gemma2b-summarize-gpt4o-64k / trainer_state.json
chansung's picture
Model save
600f26e verified
raw
history blame
No virus
52.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.273972602739725,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00684931506849315,
"grad_norm": 3.5625,
"learning_rate": 9.132420091324201e-07,
"loss": 3.0017,
"step": 1
},
{
"epoch": 0.03424657534246575,
"grad_norm": 2.9375,
"learning_rate": 4.566210045662101e-06,
"loss": 3.0725,
"step": 5
},
{
"epoch": 0.0684931506849315,
"grad_norm": 3.078125,
"learning_rate": 9.132420091324201e-06,
"loss": 3.0374,
"step": 10
},
{
"epoch": 0.10273972602739725,
"grad_norm": 2.515625,
"learning_rate": 1.3698630136986302e-05,
"loss": 3.0044,
"step": 15
},
{
"epoch": 0.136986301369863,
"grad_norm": 2.3125,
"learning_rate": 1.8264840182648402e-05,
"loss": 2.9373,
"step": 20
},
{
"epoch": 0.17123287671232876,
"grad_norm": 4.90625,
"learning_rate": 2.2831050228310503e-05,
"loss": 2.7849,
"step": 25
},
{
"epoch": 0.2054794520547945,
"grad_norm": 17.0,
"learning_rate": 2.7397260273972603e-05,
"loss": 2.6263,
"step": 30
},
{
"epoch": 0.23972602739726026,
"grad_norm": 1.0859375,
"learning_rate": 3.1963470319634704e-05,
"loss": 2.4603,
"step": 35
},
{
"epoch": 0.273972602739726,
"grad_norm": 1.75,
"learning_rate": 3.6529680365296805e-05,
"loss": 2.3423,
"step": 40
},
{
"epoch": 0.3082191780821918,
"grad_norm": 3.0,
"learning_rate": 4.1095890410958905e-05,
"loss": 2.2364,
"step": 45
},
{
"epoch": 0.3424657534246575,
"grad_norm": 1.0546875,
"learning_rate": 4.5662100456621006e-05,
"loss": 2.0795,
"step": 50
},
{
"epoch": 0.3767123287671233,
"grad_norm": 1.734375,
"learning_rate": 5.0228310502283106e-05,
"loss": 1.9497,
"step": 55
},
{
"epoch": 0.410958904109589,
"grad_norm": 1.25,
"learning_rate": 5.479452054794521e-05,
"loss": 1.8556,
"step": 60
},
{
"epoch": 0.4452054794520548,
"grad_norm": 0.640625,
"learning_rate": 5.936073059360731e-05,
"loss": 1.759,
"step": 65
},
{
"epoch": 0.4794520547945205,
"grad_norm": 0.97265625,
"learning_rate": 6.392694063926941e-05,
"loss": 1.6773,
"step": 70
},
{
"epoch": 0.5136986301369864,
"grad_norm": 1.9296875,
"learning_rate": 6.84931506849315e-05,
"loss": 1.6105,
"step": 75
},
{
"epoch": 0.547945205479452,
"grad_norm": 0.51171875,
"learning_rate": 7.305936073059361e-05,
"loss": 1.5517,
"step": 80
},
{
"epoch": 0.5821917808219178,
"grad_norm": 0.45703125,
"learning_rate": 7.76255707762557e-05,
"loss": 1.4895,
"step": 85
},
{
"epoch": 0.6164383561643836,
"grad_norm": 0.326171875,
"learning_rate": 8.219178082191781e-05,
"loss": 1.466,
"step": 90
},
{
"epoch": 0.6506849315068494,
"grad_norm": 0.283203125,
"learning_rate": 8.67579908675799e-05,
"loss": 1.4237,
"step": 95
},
{
"epoch": 0.684931506849315,
"grad_norm": 0.333984375,
"learning_rate": 9.132420091324201e-05,
"loss": 1.3836,
"step": 100
},
{
"epoch": 0.7191780821917808,
"grad_norm": 0.578125,
"learning_rate": 9.58904109589041e-05,
"loss": 1.3655,
"step": 105
},
{
"epoch": 0.7534246575342466,
"grad_norm": 0.484375,
"learning_rate": 0.00010045662100456621,
"loss": 1.3369,
"step": 110
},
{
"epoch": 0.7876712328767124,
"grad_norm": 0.3671875,
"learning_rate": 0.00010502283105022832,
"loss": 1.3149,
"step": 115
},
{
"epoch": 0.821917808219178,
"grad_norm": 0.9765625,
"learning_rate": 0.00010958904109589041,
"loss": 1.3051,
"step": 120
},
{
"epoch": 0.8561643835616438,
"grad_norm": 0.74609375,
"learning_rate": 0.00011415525114155252,
"loss": 1.2835,
"step": 125
},
{
"epoch": 0.8904109589041096,
"grad_norm": 0.271484375,
"learning_rate": 0.00011872146118721462,
"loss": 1.2805,
"step": 130
},
{
"epoch": 0.9246575342465754,
"grad_norm": 0.82421875,
"learning_rate": 0.0001232876712328767,
"loss": 1.2617,
"step": 135
},
{
"epoch": 0.958904109589041,
"grad_norm": 0.498046875,
"learning_rate": 0.00012785388127853882,
"loss": 1.2659,
"step": 140
},
{
"epoch": 0.9931506849315068,
"grad_norm": 0.28125,
"learning_rate": 0.00013242009132420092,
"loss": 1.2474,
"step": 145
},
{
"epoch": 1.0,
"eval_loss": 2.523677110671997,
"eval_runtime": 0.5573,
"eval_samples_per_second": 17.944,
"eval_steps_per_second": 1.794,
"step": 146
},
{
"epoch": 1.0273972602739727,
"grad_norm": 0.58984375,
"learning_rate": 0.000136986301369863,
"loss": 1.2351,
"step": 150
},
{
"epoch": 1.0616438356164384,
"grad_norm": 0.5234375,
"learning_rate": 0.0001415525114155251,
"loss": 1.2256,
"step": 155
},
{
"epoch": 1.095890410958904,
"grad_norm": 0.55859375,
"learning_rate": 0.00014611872146118722,
"loss": 1.2203,
"step": 160
},
{
"epoch": 1.13013698630137,
"grad_norm": 0.35546875,
"learning_rate": 0.00015068493150684933,
"loss": 1.1994,
"step": 165
},
{
"epoch": 1.1643835616438356,
"grad_norm": 0.345703125,
"learning_rate": 0.0001552511415525114,
"loss": 1.2069,
"step": 170
},
{
"epoch": 1.1986301369863013,
"grad_norm": 0.412109375,
"learning_rate": 0.00015981735159817351,
"loss": 1.1912,
"step": 175
},
{
"epoch": 1.2328767123287672,
"grad_norm": 0.365234375,
"learning_rate": 0.00016438356164383562,
"loss": 1.1879,
"step": 180
},
{
"epoch": 1.2671232876712328,
"grad_norm": 0.42578125,
"learning_rate": 0.00016894977168949773,
"loss": 1.1983,
"step": 185
},
{
"epoch": 1.3013698630136985,
"grad_norm": 0.63671875,
"learning_rate": 0.0001735159817351598,
"loss": 1.1872,
"step": 190
},
{
"epoch": 1.3356164383561644,
"grad_norm": 0.376953125,
"learning_rate": 0.00017808219178082192,
"loss": 1.1806,
"step": 195
},
{
"epoch": 1.36986301369863,
"grad_norm": 1.1640625,
"learning_rate": 0.00018264840182648402,
"loss": 1.1849,
"step": 200
},
{
"epoch": 1.404109589041096,
"grad_norm": 1.046875,
"learning_rate": 0.00018721461187214613,
"loss": 1.1782,
"step": 205
},
{
"epoch": 1.4383561643835616,
"grad_norm": 0.373046875,
"learning_rate": 0.0001917808219178082,
"loss": 1.1727,
"step": 210
},
{
"epoch": 1.4726027397260273,
"grad_norm": 0.482421875,
"learning_rate": 0.00019634703196347032,
"loss": 1.1725,
"step": 215
},
{
"epoch": 1.5068493150684932,
"grad_norm": 0.80859375,
"learning_rate": 0.00019999987297289245,
"loss": 1.1611,
"step": 220
},
{
"epoch": 1.541095890410959,
"grad_norm": 0.56640625,
"learning_rate": 0.00019999542705801296,
"loss": 1.1642,
"step": 225
},
{
"epoch": 1.5753424657534247,
"grad_norm": 0.361328125,
"learning_rate": 0.00019998463011046926,
"loss": 1.1608,
"step": 230
},
{
"epoch": 1.6095890410958904,
"grad_norm": 0.76953125,
"learning_rate": 0.00019996748281601038,
"loss": 1.1563,
"step": 235
},
{
"epoch": 1.643835616438356,
"grad_norm": 0.388671875,
"learning_rate": 0.00019994398626371643,
"loss": 1.1457,
"step": 240
},
{
"epoch": 1.678082191780822,
"grad_norm": 0.45703125,
"learning_rate": 0.0001999141419459293,
"loss": 1.1609,
"step": 245
},
{
"epoch": 1.7123287671232876,
"grad_norm": 0.70703125,
"learning_rate": 0.00019987795175815807,
"loss": 1.1479,
"step": 250
},
{
"epoch": 1.7465753424657535,
"grad_norm": 0.451171875,
"learning_rate": 0.0001998354179989585,
"loss": 1.148,
"step": 255
},
{
"epoch": 1.7808219178082192,
"grad_norm": 0.421875,
"learning_rate": 0.0001997865433697871,
"loss": 1.1513,
"step": 260
},
{
"epoch": 1.8150684931506849,
"grad_norm": 0.64453125,
"learning_rate": 0.00019973133097482947,
"loss": 1.1327,
"step": 265
},
{
"epoch": 1.8493150684931505,
"grad_norm": 0.326171875,
"learning_rate": 0.00019966978432080316,
"loss": 1.1424,
"step": 270
},
{
"epoch": 1.8835616438356164,
"grad_norm": 0.4375,
"learning_rate": 0.00019960190731673505,
"loss": 1.1387,
"step": 275
},
{
"epoch": 1.9178082191780823,
"grad_norm": 0.34765625,
"learning_rate": 0.00019952770427371304,
"loss": 1.1258,
"step": 280
},
{
"epoch": 1.952054794520548,
"grad_norm": 0.447265625,
"learning_rate": 0.00019944717990461207,
"loss": 1.1226,
"step": 285
},
{
"epoch": 1.9863013698630136,
"grad_norm": 0.427734375,
"learning_rate": 0.00019936033932379504,
"loss": 1.1269,
"step": 290
},
{
"epoch": 2.0,
"eval_loss": 2.4804677963256836,
"eval_runtime": 0.5614,
"eval_samples_per_second": 17.814,
"eval_steps_per_second": 1.781,
"step": 292
},
{
"epoch": 2.0205479452054793,
"grad_norm": 0.4609375,
"learning_rate": 0.00019926718804678785,
"loss": 1.1225,
"step": 295
},
{
"epoch": 2.0547945205479454,
"grad_norm": 0.435546875,
"learning_rate": 0.000199167731989929,
"loss": 1.1022,
"step": 300
},
{
"epoch": 2.089041095890411,
"grad_norm": 0.4140625,
"learning_rate": 0.00019906197746999408,
"loss": 1.1012,
"step": 305
},
{
"epoch": 2.1232876712328768,
"grad_norm": 0.3515625,
"learning_rate": 0.00019894993120379435,
"loss": 1.0928,
"step": 310
},
{
"epoch": 2.1575342465753424,
"grad_norm": 0.43359375,
"learning_rate": 0.00019883160030775016,
"loss": 1.1032,
"step": 315
},
{
"epoch": 2.191780821917808,
"grad_norm": 0.96484375,
"learning_rate": 0.00019870699229743911,
"loss": 1.0966,
"step": 320
},
{
"epoch": 2.2260273972602738,
"grad_norm": 0.81640625,
"learning_rate": 0.0001985761150871185,
"loss": 1.0952,
"step": 325
},
{
"epoch": 2.26027397260274,
"grad_norm": 0.462890625,
"learning_rate": 0.00019843897698922284,
"loss": 1.0936,
"step": 330
},
{
"epoch": 2.2945205479452055,
"grad_norm": 0.3203125,
"learning_rate": 0.00019829558671383585,
"loss": 1.0938,
"step": 335
},
{
"epoch": 2.328767123287671,
"grad_norm": 0.494140625,
"learning_rate": 0.00019814595336813725,
"loss": 1.0856,
"step": 340
},
{
"epoch": 2.363013698630137,
"grad_norm": 0.353515625,
"learning_rate": 0.0001979900864558242,
"loss": 1.0851,
"step": 345
},
{
"epoch": 2.3972602739726026,
"grad_norm": 0.3359375,
"learning_rate": 0.00019782799587650805,
"loss": 1.1018,
"step": 350
},
{
"epoch": 2.4315068493150687,
"grad_norm": 0.39453125,
"learning_rate": 0.00019765969192508508,
"loss": 1.0882,
"step": 355
},
{
"epoch": 2.4657534246575343,
"grad_norm": 0.341796875,
"learning_rate": 0.00019748518529108316,
"loss": 1.0932,
"step": 360
},
{
"epoch": 2.5,
"grad_norm": 0.404296875,
"learning_rate": 0.00019730448705798239,
"loss": 1.0945,
"step": 365
},
{
"epoch": 2.5342465753424657,
"grad_norm": 0.35546875,
"learning_rate": 0.00019711760870251143,
"loss": 1.0881,
"step": 370
},
{
"epoch": 2.5684931506849313,
"grad_norm": 0.40234375,
"learning_rate": 0.00019692456209391846,
"loss": 1.0802,
"step": 375
},
{
"epoch": 2.602739726027397,
"grad_norm": 0.52734375,
"learning_rate": 0.0001967253594932173,
"loss": 1.0822,
"step": 380
},
{
"epoch": 2.636986301369863,
"grad_norm": 0.337890625,
"learning_rate": 0.00019652001355240878,
"loss": 1.0907,
"step": 385
},
{
"epoch": 2.671232876712329,
"grad_norm": 0.373046875,
"learning_rate": 0.00019630853731367713,
"loss": 1.0868,
"step": 390
},
{
"epoch": 2.7054794520547945,
"grad_norm": 0.40234375,
"learning_rate": 0.0001960909442085615,
"loss": 1.086,
"step": 395
},
{
"epoch": 2.73972602739726,
"grad_norm": 0.384765625,
"learning_rate": 0.00019586724805710306,
"loss": 1.0746,
"step": 400
},
{
"epoch": 2.7739726027397262,
"grad_norm": 0.353515625,
"learning_rate": 0.0001956374630669672,
"loss": 1.0832,
"step": 405
},
{
"epoch": 2.808219178082192,
"grad_norm": 0.34765625,
"learning_rate": 0.00019540160383254107,
"loss": 1.0753,
"step": 410
},
{
"epoch": 2.8424657534246576,
"grad_norm": 0.328125,
"learning_rate": 0.00019515968533400673,
"loss": 1.0844,
"step": 415
},
{
"epoch": 2.8767123287671232,
"grad_norm": 0.34765625,
"learning_rate": 0.00019491172293638968,
"loss": 1.083,
"step": 420
},
{
"epoch": 2.910958904109589,
"grad_norm": 0.369140625,
"learning_rate": 0.00019465773238858298,
"loss": 1.0757,
"step": 425
},
{
"epoch": 2.9452054794520546,
"grad_norm": 0.56640625,
"learning_rate": 0.00019439772982234697,
"loss": 1.075,
"step": 430
},
{
"epoch": 2.9794520547945207,
"grad_norm": 3.71875,
"learning_rate": 0.00019413173175128473,
"loss": 1.0909,
"step": 435
},
{
"epoch": 3.0,
"eval_loss": 2.4892916679382324,
"eval_runtime": 0.5522,
"eval_samples_per_second": 18.108,
"eval_steps_per_second": 1.811,
"step": 438
},
{
"epoch": 3.0136986301369864,
"grad_norm": 1.3046875,
"learning_rate": 0.0001938597550697932,
"loss": 1.0635,
"step": 440
},
{
"epoch": 3.047945205479452,
"grad_norm": 0.3984375,
"learning_rate": 0.00019358181705199015,
"loss": 1.0518,
"step": 445
},
{
"epoch": 3.0821917808219177,
"grad_norm": 0.369140625,
"learning_rate": 0.00019329793535061723,
"loss": 1.0509,
"step": 450
},
{
"epoch": 3.1164383561643834,
"grad_norm": 0.412109375,
"learning_rate": 0.00019300812799591846,
"loss": 1.0529,
"step": 455
},
{
"epoch": 3.1506849315068495,
"grad_norm": 0.66015625,
"learning_rate": 0.00019271241339449536,
"loss": 1.0416,
"step": 460
},
{
"epoch": 3.184931506849315,
"grad_norm": 0.89453125,
"learning_rate": 0.00019241081032813772,
"loss": 1.0488,
"step": 465
},
{
"epoch": 3.219178082191781,
"grad_norm": 0.55078125,
"learning_rate": 0.00019210333795263075,
"loss": 1.0402,
"step": 470
},
{
"epoch": 3.2534246575342465,
"grad_norm": 0.73046875,
"learning_rate": 0.00019179001579653853,
"loss": 1.0568,
"step": 475
},
{
"epoch": 3.287671232876712,
"grad_norm": 1.0390625,
"learning_rate": 0.0001914708637599636,
"loss": 1.0487,
"step": 480
},
{
"epoch": 3.3219178082191783,
"grad_norm": 0.400390625,
"learning_rate": 0.00019114590211328288,
"loss": 1.0468,
"step": 485
},
{
"epoch": 3.356164383561644,
"grad_norm": 0.439453125,
"learning_rate": 0.0001908151514958606,
"loss": 1.0538,
"step": 490
},
{
"epoch": 3.3904109589041096,
"grad_norm": 0.37109375,
"learning_rate": 0.00019047863291473717,
"loss": 1.0441,
"step": 495
},
{
"epoch": 3.4246575342465753,
"grad_norm": 0.34765625,
"learning_rate": 0.00019013636774329495,
"loss": 1.0521,
"step": 500
},
{
"epoch": 3.458904109589041,
"grad_norm": 0.4140625,
"learning_rate": 0.00018978837771990085,
"loss": 1.0405,
"step": 505
},
{
"epoch": 3.493150684931507,
"grad_norm": 0.4375,
"learning_rate": 0.0001894346849465257,
"loss": 1.0439,
"step": 510
},
{
"epoch": 3.5273972602739727,
"grad_norm": 0.349609375,
"learning_rate": 0.00018907531188734026,
"loss": 1.0525,
"step": 515
},
{
"epoch": 3.5616438356164384,
"grad_norm": 0.47265625,
"learning_rate": 0.00018871028136728874,
"loss": 1.0493,
"step": 520
},
{
"epoch": 3.595890410958904,
"grad_norm": 0.35546875,
"learning_rate": 0.00018833961657063885,
"loss": 1.0405,
"step": 525
},
{
"epoch": 3.6301369863013697,
"grad_norm": 0.50390625,
"learning_rate": 0.0001879633410395095,
"loss": 1.0452,
"step": 530
},
{
"epoch": 3.6643835616438354,
"grad_norm": 0.34765625,
"learning_rate": 0.00018758147867237548,
"loss": 1.0515,
"step": 535
},
{
"epoch": 3.6986301369863015,
"grad_norm": 0.421875,
"learning_rate": 0.00018719405372254948,
"loss": 1.0453,
"step": 540
},
{
"epoch": 3.732876712328767,
"grad_norm": 0.3359375,
"learning_rate": 0.00018680109079664188,
"loss": 1.0356,
"step": 545
},
{
"epoch": 3.767123287671233,
"grad_norm": 0.333984375,
"learning_rate": 0.0001864026148529978,
"loss": 1.0355,
"step": 550
},
{
"epoch": 3.8013698630136985,
"grad_norm": 0.427734375,
"learning_rate": 0.00018599865120011192,
"loss": 1.0452,
"step": 555
},
{
"epoch": 3.8356164383561646,
"grad_norm": 0.34375,
"learning_rate": 0.00018558922549502107,
"loss": 1.0258,
"step": 560
},
{
"epoch": 3.8698630136986303,
"grad_norm": 0.412109375,
"learning_rate": 0.0001851743637416747,
"loss": 1.0423,
"step": 565
},
{
"epoch": 3.904109589041096,
"grad_norm": 0.31640625,
"learning_rate": 0.00018475409228928312,
"loss": 1.0238,
"step": 570
},
{
"epoch": 3.9383561643835616,
"grad_norm": 0.400390625,
"learning_rate": 0.00018432843783064429,
"loss": 1.041,
"step": 575
},
{
"epoch": 3.9726027397260273,
"grad_norm": 0.412109375,
"learning_rate": 0.00018389742740044813,
"loss": 1.0354,
"step": 580
},
{
"epoch": 4.0,
"eval_loss": 2.5017333030700684,
"eval_runtime": 0.5568,
"eval_samples_per_second": 17.961,
"eval_steps_per_second": 1.796,
"step": 584
},
{
"epoch": 4.006849315068493,
"grad_norm": 0.52734375,
"learning_rate": 0.00018346108837355972,
"loss": 1.0411,
"step": 585
},
{
"epoch": 4.041095890410959,
"grad_norm": 0.41796875,
"learning_rate": 0.00018301944846328049,
"loss": 0.9963,
"step": 590
},
{
"epoch": 4.075342465753424,
"grad_norm": 0.36328125,
"learning_rate": 0.0001825725357195881,
"loss": 1.0137,
"step": 595
},
{
"epoch": 4.109589041095891,
"grad_norm": 0.48046875,
"learning_rate": 0.00018212037852735486,
"loss": 1.006,
"step": 600
},
{
"epoch": 4.1438356164383565,
"grad_norm": 0.4140625,
"learning_rate": 0.0001816630056045451,
"loss": 1.0075,
"step": 605
},
{
"epoch": 4.178082191780822,
"grad_norm": 0.353515625,
"learning_rate": 0.0001812004460003909,
"loss": 0.9975,
"step": 610
},
{
"epoch": 4.212328767123288,
"grad_norm": 0.365234375,
"learning_rate": 0.00018073272909354727,
"loss": 1.0171,
"step": 615
},
{
"epoch": 4.2465753424657535,
"grad_norm": 0.51171875,
"learning_rate": 0.0001802598845902262,
"loss": 0.9953,
"step": 620
},
{
"epoch": 4.280821917808219,
"grad_norm": 0.38671875,
"learning_rate": 0.00017978194252230985,
"loss": 1.008,
"step": 625
},
{
"epoch": 4.315068493150685,
"grad_norm": 0.359375,
"learning_rate": 0.00017929893324544332,
"loss": 0.9993,
"step": 630
},
{
"epoch": 4.3493150684931505,
"grad_norm": 0.56640625,
"learning_rate": 0.0001788108874371063,
"loss": 1.0119,
"step": 635
},
{
"epoch": 4.383561643835616,
"grad_norm": 0.33203125,
"learning_rate": 0.00017831783609466504,
"loss": 1.0047,
"step": 640
},
{
"epoch": 4.417808219178082,
"grad_norm": 0.341796875,
"learning_rate": 0.00017781981053340337,
"loss": 1.0143,
"step": 645
},
{
"epoch": 4.4520547945205475,
"grad_norm": 0.345703125,
"learning_rate": 0.00017731684238453385,
"loss": 1.0023,
"step": 650
},
{
"epoch": 4.486301369863014,
"grad_norm": 0.37890625,
"learning_rate": 0.0001768089635931887,
"loss": 1.0125,
"step": 655
},
{
"epoch": 4.52054794520548,
"grad_norm": 0.609375,
"learning_rate": 0.00017629620641639103,
"loss": 1.0074,
"step": 660
},
{
"epoch": 4.554794520547945,
"grad_norm": 0.36328125,
"learning_rate": 0.00017577860342100579,
"loss": 1.0124,
"step": 665
},
{
"epoch": 4.589041095890411,
"grad_norm": 0.65625,
"learning_rate": 0.0001752561874816717,
"loss": 1.015,
"step": 670
},
{
"epoch": 4.623287671232877,
"grad_norm": 0.38671875,
"learning_rate": 0.00017472899177871297,
"loss": 1.0066,
"step": 675
},
{
"epoch": 4.657534246575342,
"grad_norm": 0.32421875,
"learning_rate": 0.00017419704979603214,
"loss": 1.0182,
"step": 680
},
{
"epoch": 4.691780821917808,
"grad_norm": 0.34375,
"learning_rate": 0.00017366039531898326,
"loss": 1.0139,
"step": 685
},
{
"epoch": 4.726027397260274,
"grad_norm": 0.349609375,
"learning_rate": 0.00017311906243222614,
"loss": 1.0162,
"step": 690
},
{
"epoch": 4.760273972602739,
"grad_norm": 0.3359375,
"learning_rate": 0.0001725730855175615,
"loss": 1.019,
"step": 695
},
{
"epoch": 4.794520547945205,
"grad_norm": 0.431640625,
"learning_rate": 0.00017202249925174723,
"loss": 1.0051,
"step": 700
},
{
"epoch": 4.828767123287671,
"grad_norm": 0.4140625,
"learning_rate": 0.00017146733860429612,
"loss": 1.0174,
"step": 705
},
{
"epoch": 4.863013698630137,
"grad_norm": 0.408203125,
"learning_rate": 0.0001709076388352546,
"loss": 1.0065,
"step": 710
},
{
"epoch": 4.897260273972603,
"grad_norm": 0.359375,
"learning_rate": 0.00017034343549296346,
"loss": 1.0262,
"step": 715
},
{
"epoch": 4.931506849315069,
"grad_norm": 0.44140625,
"learning_rate": 0.00016977476441179992,
"loss": 1.0023,
"step": 720
},
{
"epoch": 4.965753424657534,
"grad_norm": 0.357421875,
"learning_rate": 0.0001692016617099018,
"loss": 1.0048,
"step": 725
},
{
"epoch": 5.0,
"grad_norm": 0.431640625,
"learning_rate": 0.0001686241637868734,
"loss": 1.0016,
"step": 730
},
{
"epoch": 5.0,
"eval_loss": 2.5294971466064453,
"eval_runtime": 0.5501,
"eval_samples_per_second": 18.178,
"eval_steps_per_second": 1.818,
"step": 730
},
{
"epoch": 5.034246575342466,
"grad_norm": 0.380859375,
"learning_rate": 0.0001680423073214737,
"loss": 0.9822,
"step": 735
},
{
"epoch": 5.068493150684931,
"grad_norm": 0.369140625,
"learning_rate": 0.00016745612926928694,
"loss": 0.9842,
"step": 740
},
{
"epoch": 5.102739726027397,
"grad_norm": 0.38671875,
"learning_rate": 0.0001668656668603751,
"loss": 0.9717,
"step": 745
},
{
"epoch": 5.136986301369863,
"grad_norm": 0.375,
"learning_rate": 0.00016627095759691362,
"loss": 0.9685,
"step": 750
},
{
"epoch": 5.171232876712328,
"grad_norm": 0.353515625,
"learning_rate": 0.0001656720392508094,
"loss": 0.9744,
"step": 755
},
{
"epoch": 5.205479452054795,
"grad_norm": 0.376953125,
"learning_rate": 0.00016506894986130171,
"loss": 0.9736,
"step": 760
},
{
"epoch": 5.239726027397261,
"grad_norm": 0.486328125,
"learning_rate": 0.00016446172773254629,
"loss": 0.972,
"step": 765
},
{
"epoch": 5.273972602739726,
"grad_norm": 0.470703125,
"learning_rate": 0.00016385041143118255,
"loss": 0.9813,
"step": 770
},
{
"epoch": 5.308219178082192,
"grad_norm": 0.5546875,
"learning_rate": 0.000163235039783884,
"loss": 0.9855,
"step": 775
},
{
"epoch": 5.342465753424658,
"grad_norm": 0.462890625,
"learning_rate": 0.0001626156518748922,
"loss": 0.9765,
"step": 780
},
{
"epoch": 5.376712328767123,
"grad_norm": 0.59375,
"learning_rate": 0.00016199228704353455,
"loss": 0.9876,
"step": 785
},
{
"epoch": 5.410958904109589,
"grad_norm": 0.53125,
"learning_rate": 0.00016136498488172568,
"loss": 0.9772,
"step": 790
},
{
"epoch": 5.445205479452055,
"grad_norm": 0.3984375,
"learning_rate": 0.0001607337852314527,
"loss": 0.9861,
"step": 795
},
{
"epoch": 5.47945205479452,
"grad_norm": 0.3671875,
"learning_rate": 0.00016009872818224485,
"loss": 0.9879,
"step": 800
},
{
"epoch": 5.513698630136986,
"grad_norm": 0.357421875,
"learning_rate": 0.00015945985406862721,
"loss": 0.9821,
"step": 805
},
{
"epoch": 5.5479452054794525,
"grad_norm": 0.4375,
"learning_rate": 0.00015881720346755905,
"loss": 0.9748,
"step": 810
},
{
"epoch": 5.582191780821918,
"grad_norm": 0.376953125,
"learning_rate": 0.00015817081719585643,
"loss": 0.9726,
"step": 815
},
{
"epoch": 5.616438356164384,
"grad_norm": 0.37890625,
"learning_rate": 0.00015752073630759998,
"loss": 0.9918,
"step": 820
},
{
"epoch": 5.6506849315068495,
"grad_norm": 0.419921875,
"learning_rate": 0.00015686700209152738,
"loss": 0.9775,
"step": 825
},
{
"epoch": 5.684931506849315,
"grad_norm": 0.33203125,
"learning_rate": 0.00015620965606841098,
"loss": 0.9734,
"step": 830
},
{
"epoch": 5.719178082191781,
"grad_norm": 0.37890625,
"learning_rate": 0.0001555487399884206,
"loss": 0.9753,
"step": 835
},
{
"epoch": 5.7534246575342465,
"grad_norm": 0.39453125,
"learning_rate": 0.00015488429582847192,
"loss": 0.9701,
"step": 840
},
{
"epoch": 5.787671232876712,
"grad_norm": 0.357421875,
"learning_rate": 0.0001542163657895605,
"loss": 0.9726,
"step": 845
},
{
"epoch": 5.821917808219178,
"grad_norm": 0.4375,
"learning_rate": 0.00015354499229408114,
"loss": 0.9755,
"step": 850
},
{
"epoch": 5.8561643835616435,
"grad_norm": 0.50390625,
"learning_rate": 0.0001528702179831338,
"loss": 0.9733,
"step": 855
},
{
"epoch": 5.890410958904109,
"grad_norm": 0.419921875,
"learning_rate": 0.00015219208571381525,
"loss": 0.9795,
"step": 860
},
{
"epoch": 5.924657534246576,
"grad_norm": 0.466796875,
"learning_rate": 0.00015151063855649698,
"loss": 0.9906,
"step": 865
},
{
"epoch": 5.958904109589041,
"grad_norm": 0.35546875,
"learning_rate": 0.00015082591979208976,
"loss": 0.983,
"step": 870
},
{
"epoch": 5.993150684931507,
"grad_norm": 0.51953125,
"learning_rate": 0.00015013797290929466,
"loss": 0.9823,
"step": 875
},
{
"epoch": 6.0,
"eval_loss": 2.5500409603118896,
"eval_runtime": 0.5455,
"eval_samples_per_second": 18.332,
"eval_steps_per_second": 1.833,
"step": 876
},
{
"epoch": 6.027397260273973,
"grad_norm": 0.380859375,
"learning_rate": 0.00014944684160184108,
"loss": 0.9588,
"step": 880
},
{
"epoch": 6.061643835616438,
"grad_norm": 0.435546875,
"learning_rate": 0.00014875256976571135,
"loss": 0.9449,
"step": 885
},
{
"epoch": 6.095890410958904,
"grad_norm": 0.41796875,
"learning_rate": 0.00014805520149635307,
"loss": 0.9336,
"step": 890
},
{
"epoch": 6.13013698630137,
"grad_norm": 0.388671875,
"learning_rate": 0.00014735478108587828,
"loss": 0.9428,
"step": 895
},
{
"epoch": 6.164383561643835,
"grad_norm": 0.578125,
"learning_rate": 0.00014665135302025035,
"loss": 0.9457,
"step": 900
},
{
"epoch": 6.198630136986301,
"grad_norm": 0.375,
"learning_rate": 0.00014594496197645852,
"loss": 0.9425,
"step": 905
},
{
"epoch": 6.232876712328767,
"grad_norm": 0.361328125,
"learning_rate": 0.0001452356528196804,
"loss": 0.9492,
"step": 910
},
{
"epoch": 6.267123287671233,
"grad_norm": 0.34375,
"learning_rate": 0.00014452347060043237,
"loss": 0.9542,
"step": 915
},
{
"epoch": 6.301369863013699,
"grad_norm": 0.375,
"learning_rate": 0.00014380846055170828,
"loss": 0.9488,
"step": 920
},
{
"epoch": 6.335616438356165,
"grad_norm": 0.56640625,
"learning_rate": 0.00014309066808610655,
"loss": 0.9532,
"step": 925
},
{
"epoch": 6.36986301369863,
"grad_norm": 0.451171875,
"learning_rate": 0.0001423701387929459,
"loss": 0.954,
"step": 930
},
{
"epoch": 6.404109589041096,
"grad_norm": 0.361328125,
"learning_rate": 0.00014164691843536982,
"loss": 0.9513,
"step": 935
},
{
"epoch": 6.438356164383562,
"grad_norm": 0.4375,
"learning_rate": 0.00014092105294744,
"loss": 0.954,
"step": 940
},
{
"epoch": 6.472602739726027,
"grad_norm": 0.404296875,
"learning_rate": 0.00014019258843121893,
"loss": 0.9549,
"step": 945
},
{
"epoch": 6.506849315068493,
"grad_norm": 0.38671875,
"learning_rate": 0.0001394615711538417,
"loss": 0.9509,
"step": 950
},
{
"epoch": 6.541095890410959,
"grad_norm": 0.376953125,
"learning_rate": 0.00013872804754457759,
"loss": 0.9556,
"step": 955
},
{
"epoch": 6.575342465753424,
"grad_norm": 0.400390625,
"learning_rate": 0.00013799206419188103,
"loss": 0.9596,
"step": 960
},
{
"epoch": 6.609589041095891,
"grad_norm": 0.37890625,
"learning_rate": 0.00013725366784043288,
"loss": 0.9532,
"step": 965
},
{
"epoch": 6.6438356164383565,
"grad_norm": 0.361328125,
"learning_rate": 0.00013651290538817113,
"loss": 0.9547,
"step": 970
},
{
"epoch": 6.678082191780822,
"grad_norm": 0.392578125,
"learning_rate": 0.0001357698238833126,
"loss": 0.9619,
"step": 975
},
{
"epoch": 6.712328767123288,
"grad_norm": 0.38671875,
"learning_rate": 0.00013502447052136455,
"loss": 0.9457,
"step": 980
},
{
"epoch": 6.7465753424657535,
"grad_norm": 0.384765625,
"learning_rate": 0.00013427689264212738,
"loss": 0.9595,
"step": 985
},
{
"epoch": 6.780821917808219,
"grad_norm": 0.3984375,
"learning_rate": 0.00013352713772668765,
"loss": 0.9501,
"step": 990
},
{
"epoch": 6.815068493150685,
"grad_norm": 0.404296875,
"learning_rate": 0.0001327752533944025,
"loss": 0.9542,
"step": 995
},
{
"epoch": 6.8493150684931505,
"grad_norm": 0.5546875,
"learning_rate": 0.00013202128739987532,
"loss": 0.957,
"step": 1000
},
{
"epoch": 6.883561643835616,
"grad_norm": 0.388671875,
"learning_rate": 0.00013126528762992247,
"loss": 0.9597,
"step": 1005
},
{
"epoch": 6.917808219178082,
"grad_norm": 0.4140625,
"learning_rate": 0.0001305073021005321,
"loss": 0.9525,
"step": 1010
},
{
"epoch": 6.9520547945205475,
"grad_norm": 0.400390625,
"learning_rate": 0.0001297473789538142,
"loss": 0.9554,
"step": 1015
},
{
"epoch": 6.986301369863014,
"grad_norm": 0.37890625,
"learning_rate": 0.00012898556645494325,
"loss": 0.955,
"step": 1020
},
{
"epoch": 7.0,
"eval_loss": 2.5866098403930664,
"eval_runtime": 0.5603,
"eval_samples_per_second": 17.847,
"eval_steps_per_second": 1.785,
"step": 1022
},
{
"epoch": 7.02054794520548,
"grad_norm": 0.380859375,
"learning_rate": 0.0001282219129890925,
"loss": 0.9357,
"step": 1025
},
{
"epoch": 7.054794520547945,
"grad_norm": 0.373046875,
"learning_rate": 0.00012745646705836097,
"loss": 0.9228,
"step": 1030
},
{
"epoch": 7.089041095890411,
"grad_norm": 0.5390625,
"learning_rate": 0.0001266892772786929,
"loss": 0.9121,
"step": 1035
},
{
"epoch": 7.123287671232877,
"grad_norm": 0.37109375,
"learning_rate": 0.0001259203923767901,
"loss": 0.9181,
"step": 1040
},
{
"epoch": 7.157534246575342,
"grad_norm": 0.37109375,
"learning_rate": 0.00012514986118701695,
"loss": 0.9176,
"step": 1045
},
{
"epoch": 7.191780821917808,
"grad_norm": 0.3984375,
"learning_rate": 0.00012437773264829897,
"loss": 0.9241,
"step": 1050
},
{
"epoch": 7.226027397260274,
"grad_norm": 0.376953125,
"learning_rate": 0.00012360405580101448,
"loss": 0.9287,
"step": 1055
},
{
"epoch": 7.260273972602739,
"grad_norm": 0.375,
"learning_rate": 0.00012282887978387976,
"loss": 0.9347,
"step": 1060
},
{
"epoch": 7.294520547945205,
"grad_norm": 0.3984375,
"learning_rate": 0.00012205225383082843,
"loss": 0.9275,
"step": 1065
},
{
"epoch": 7.328767123287671,
"grad_norm": 0.404296875,
"learning_rate": 0.000121274227267884,
"loss": 0.923,
"step": 1070
},
{
"epoch": 7.363013698630137,
"grad_norm": 0.388671875,
"learning_rate": 0.00012049484951002739,
"loss": 0.9332,
"step": 1075
},
{
"epoch": 7.397260273972603,
"grad_norm": 0.37890625,
"learning_rate": 0.00011971417005805818,
"loss": 0.9238,
"step": 1080
},
{
"epoch": 7.431506849315069,
"grad_norm": 0.37109375,
"learning_rate": 0.00011893223849545084,
"loss": 0.9278,
"step": 1085
},
{
"epoch": 7.465753424657534,
"grad_norm": 0.388671875,
"learning_rate": 0.00011814910448520536,
"loss": 0.9268,
"step": 1090
},
{
"epoch": 7.5,
"grad_norm": 0.404296875,
"learning_rate": 0.00011736481776669306,
"loss": 0.931,
"step": 1095
},
{
"epoch": 7.534246575342466,
"grad_norm": 0.390625,
"learning_rate": 0.00011657942815249754,
"loss": 0.9283,
"step": 1100
},
{
"epoch": 7.568493150684931,
"grad_norm": 0.369140625,
"learning_rate": 0.00011579298552525084,
"loss": 0.9246,
"step": 1105
},
{
"epoch": 7.602739726027397,
"grad_norm": 0.390625,
"learning_rate": 0.00011500553983446527,
"loss": 0.9293,
"step": 1110
},
{
"epoch": 7.636986301369863,
"grad_norm": 0.365234375,
"learning_rate": 0.00011421714109336097,
"loss": 0.9335,
"step": 1115
},
{
"epoch": 7.671232876712329,
"grad_norm": 0.453125,
"learning_rate": 0.00011342783937568926,
"loss": 0.9359,
"step": 1120
},
{
"epoch": 7.705479452054795,
"grad_norm": 0.416015625,
"learning_rate": 0.00011263768481255264,
"loss": 0.9295,
"step": 1125
},
{
"epoch": 7.739726027397261,
"grad_norm": 0.380859375,
"learning_rate": 0.00011184672758922034,
"loss": 0.9404,
"step": 1130
},
{
"epoch": 7.773972602739726,
"grad_norm": 0.396484375,
"learning_rate": 0.00011105501794194131,
"loss": 0.9289,
"step": 1135
},
{
"epoch": 7.808219178082192,
"grad_norm": 0.39453125,
"learning_rate": 0.00011026260615475333,
"loss": 0.9409,
"step": 1140
},
{
"epoch": 7.842465753424658,
"grad_norm": 0.396484375,
"learning_rate": 0.00010946954255628928,
"loss": 0.9355,
"step": 1145
},
{
"epoch": 7.876712328767123,
"grad_norm": 0.443359375,
"learning_rate": 0.00010867587751658079,
"loss": 0.9257,
"step": 1150
},
{
"epoch": 7.910958904109589,
"grad_norm": 0.365234375,
"learning_rate": 0.00010788166144385888,
"loss": 0.924,
"step": 1155
},
{
"epoch": 7.945205479452055,
"grad_norm": 0.427734375,
"learning_rate": 0.0001070869447813525,
"loss": 0.9202,
"step": 1160
},
{
"epoch": 7.97945205479452,
"grad_norm": 0.3515625,
"learning_rate": 0.0001062917780040847,
"loss": 0.9214,
"step": 1165
},
{
"epoch": 8.0,
"eval_loss": 2.6224260330200195,
"eval_runtime": 0.5566,
"eval_samples_per_second": 17.965,
"eval_steps_per_second": 1.797,
"step": 1168
},
{
"epoch": 8.013698630136986,
"grad_norm": 0.388671875,
"learning_rate": 0.0001054962116156667,
"loss": 0.9133,
"step": 1170
},
{
"epoch": 8.047945205479452,
"grad_norm": 0.41796875,
"learning_rate": 0.00010470029614509041,
"loss": 0.8952,
"step": 1175
},
{
"epoch": 8.082191780821917,
"grad_norm": 0.3984375,
"learning_rate": 0.00010390408214351892,
"loss": 0.8963,
"step": 1180
},
{
"epoch": 8.116438356164384,
"grad_norm": 0.388671875,
"learning_rate": 0.0001031076201810762,
"loss": 0.8996,
"step": 1185
},
{
"epoch": 8.150684931506849,
"grad_norm": 0.38671875,
"learning_rate": 0.00010231096084363483,
"loss": 0.8898,
"step": 1190
},
{
"epoch": 8.184931506849315,
"grad_norm": 0.390625,
"learning_rate": 0.00010151415472960342,
"loss": 0.9138,
"step": 1195
},
{
"epoch": 8.219178082191782,
"grad_norm": 0.388671875,
"learning_rate": 0.00010071725244671282,
"loss": 0.9023,
"step": 1200
},
{
"epoch": 8.253424657534246,
"grad_norm": 0.388671875,
"learning_rate": 9.992030460880181e-05,
"loss": 0.8929,
"step": 1205
},
{
"epoch": 8.287671232876713,
"grad_norm": 0.392578125,
"learning_rate": 9.91233618326026e-05,
"loss": 0.9089,
"step": 1210
},
{
"epoch": 8.321917808219178,
"grad_norm": 0.41015625,
"learning_rate": 9.83264747345259e-05,
"loss": 0.9037,
"step": 1215
},
{
"epoch": 8.356164383561644,
"grad_norm": 0.369140625,
"learning_rate": 9.752969392744606e-05,
"loss": 0.9062,
"step": 1220
},
{
"epoch": 8.39041095890411,
"grad_norm": 0.40234375,
"learning_rate": 9.673307001748661e-05,
"loss": 0.8982,
"step": 1225
},
{
"epoch": 8.424657534246576,
"grad_norm": 0.392578125,
"learning_rate": 9.593665360080599e-05,
"loss": 0.8994,
"step": 1230
},
{
"epoch": 8.45890410958904,
"grad_norm": 0.4140625,
"learning_rate": 9.514049526038418e-05,
"loss": 0.9045,
"step": 1235
},
{
"epoch": 8.493150684931507,
"grad_norm": 0.400390625,
"learning_rate": 9.43446455628097e-05,
"loss": 0.9062,
"step": 1240
},
{
"epoch": 8.527397260273972,
"grad_norm": 0.427734375,
"learning_rate": 9.354915505506839e-05,
"loss": 0.9056,
"step": 1245
},
{
"epoch": 8.561643835616438,
"grad_norm": 0.3828125,
"learning_rate": 9.27540742613326e-05,
"loss": 0.9078,
"step": 1250
},
{
"epoch": 8.595890410958905,
"grad_norm": 0.431640625,
"learning_rate": 9.195945367975256e-05,
"loss": 0.8994,
"step": 1255
},
{
"epoch": 8.63013698630137,
"grad_norm": 0.404296875,
"learning_rate": 9.116534377924883e-05,
"loss": 0.9088,
"step": 1260
},
{
"epoch": 8.664383561643836,
"grad_norm": 0.44921875,
"learning_rate": 9.037179499630703e-05,
"loss": 0.9035,
"step": 1265
},
{
"epoch": 8.698630136986301,
"grad_norm": 0.40625,
"learning_rate": 8.957885773177438e-05,
"loss": 0.9178,
"step": 1270
},
{
"epoch": 8.732876712328768,
"grad_norm": 0.51953125,
"learning_rate": 8.878658234765858e-05,
"loss": 0.9062,
"step": 1275
},
{
"epoch": 8.767123287671232,
"grad_norm": 0.486328125,
"learning_rate": 8.799501916392912e-05,
"loss": 0.9157,
"step": 1280
},
{
"epoch": 8.801369863013699,
"grad_norm": 0.392578125,
"learning_rate": 8.720421845532151e-05,
"loss": 0.912,
"step": 1285
},
{
"epoch": 8.835616438356164,
"grad_norm": 0.37109375,
"learning_rate": 8.641423044814374e-05,
"loss": 0.9085,
"step": 1290
},
{
"epoch": 8.86986301369863,
"grad_norm": 0.396484375,
"learning_rate": 8.562510531708677e-05,
"loss": 0.9158,
"step": 1295
},
{
"epoch": 8.904109589041095,
"grad_norm": 0.384765625,
"learning_rate": 8.48368931820373e-05,
"loss": 0.909,
"step": 1300
},
{
"epoch": 8.938356164383562,
"grad_norm": 0.39453125,
"learning_rate": 8.404964410489485e-05,
"loss": 0.9121,
"step": 1305
},
{
"epoch": 8.972602739726028,
"grad_norm": 0.39453125,
"learning_rate": 8.32634080863919e-05,
"loss": 0.913,
"step": 1310
},
{
"epoch": 9.0,
"eval_loss": 2.6512458324432373,
"eval_runtime": 0.5534,
"eval_samples_per_second": 18.07,
"eval_steps_per_second": 1.807,
"step": 1314
},
{
"epoch": 9.006849315068493,
"grad_norm": 0.408203125,
"learning_rate": 8.247823506291844e-05,
"loss": 0.9034,
"step": 1315
},
{
"epoch": 9.04109589041096,
"grad_norm": 0.404296875,
"learning_rate": 8.169417490335007e-05,
"loss": 0.8821,
"step": 1320
},
{
"epoch": 9.075342465753424,
"grad_norm": 0.416015625,
"learning_rate": 8.091127740588094e-05,
"loss": 0.8702,
"step": 1325
},
{
"epoch": 9.10958904109589,
"grad_norm": 0.39453125,
"learning_rate": 8.012959229486061e-05,
"loss": 0.8755,
"step": 1330
},
{
"epoch": 9.143835616438356,
"grad_norm": 0.43359375,
"learning_rate": 7.934916921763628e-05,
"loss": 0.8783,
"step": 1335
},
{
"epoch": 9.178082191780822,
"grad_norm": 0.421875,
"learning_rate": 7.857005774139907e-05,
"loss": 0.8794,
"step": 1340
},
{
"epoch": 9.212328767123287,
"grad_norm": 0.400390625,
"learning_rate": 7.779230735003628e-05,
"loss": 0.8844,
"step": 1345
},
{
"epoch": 9.246575342465754,
"grad_norm": 0.3984375,
"learning_rate": 7.701596744098818e-05,
"loss": 0.8775,
"step": 1350
},
{
"epoch": 9.280821917808218,
"grad_norm": 0.404296875,
"learning_rate": 7.624108732211081e-05,
"loss": 0.8705,
"step": 1355
},
{
"epoch": 9.315068493150685,
"grad_norm": 0.408203125,
"learning_rate": 7.54677162085442e-05,
"loss": 0.8897,
"step": 1360
},
{
"epoch": 9.349315068493151,
"grad_norm": 0.3984375,
"learning_rate": 7.469590321958662e-05,
"loss": 0.882,
"step": 1365
},
{
"epoch": 9.383561643835616,
"grad_norm": 0.43359375,
"learning_rate": 7.392569737557474e-05,
"loss": 0.8879,
"step": 1370
},
{
"epoch": 9.417808219178083,
"grad_norm": 0.416015625,
"learning_rate": 7.31571475947703e-05,
"loss": 0.8827,
"step": 1375
},
{
"epoch": 9.452054794520548,
"grad_norm": 0.412109375,
"learning_rate": 7.239030269025311e-05,
"loss": 0.8805,
"step": 1380
},
{
"epoch": 9.486301369863014,
"grad_norm": 0.408203125,
"learning_rate": 7.162521136682085e-05,
"loss": 0.8966,
"step": 1385
},
{
"epoch": 9.520547945205479,
"grad_norm": 0.431640625,
"learning_rate": 7.08619222178954e-05,
"loss": 0.8895,
"step": 1390
},
{
"epoch": 9.554794520547945,
"grad_norm": 0.423828125,
"learning_rate": 7.010048372243698e-05,
"loss": 0.8907,
"step": 1395
},
{
"epoch": 9.58904109589041,
"grad_norm": 0.42578125,
"learning_rate": 6.934094424186459e-05,
"loss": 0.8876,
"step": 1400
},
{
"epoch": 9.623287671232877,
"grad_norm": 0.39453125,
"learning_rate": 6.858335201698485e-05,
"loss": 0.8936,
"step": 1405
},
{
"epoch": 9.657534246575342,
"grad_norm": 0.451171875,
"learning_rate": 6.782775516492771e-05,
"loss": 0.8804,
"step": 1410
},
{
"epoch": 9.691780821917808,
"grad_norm": 0.40234375,
"learning_rate": 6.70742016760907e-05,
"loss": 0.8907,
"step": 1415
},
{
"epoch": 9.726027397260275,
"grad_norm": 0.4453125,
"learning_rate": 6.632273941109064e-05,
"loss": 0.8756,
"step": 1420
},
{
"epoch": 9.76027397260274,
"grad_norm": 0.40625,
"learning_rate": 6.5573416097724e-05,
"loss": 0.8963,
"step": 1425
},
{
"epoch": 9.794520547945206,
"grad_norm": 0.412109375,
"learning_rate": 6.482627932793553e-05,
"loss": 0.8998,
"step": 1430
},
{
"epoch": 9.82876712328767,
"grad_norm": 0.419921875,
"learning_rate": 6.408137655479554e-05,
"loss": 0.9024,
"step": 1435
},
{
"epoch": 9.863013698630137,
"grad_norm": 0.421875,
"learning_rate": 6.333875508948593e-05,
"loss": 0.8921,
"step": 1440
},
{
"epoch": 9.897260273972602,
"grad_norm": 0.45703125,
"learning_rate": 6.259846209829551e-05,
"loss": 0.904,
"step": 1445
},
{
"epoch": 9.931506849315069,
"grad_norm": 0.4140625,
"learning_rate": 6.186054459962399e-05,
"loss": 0.8899,
"step": 1450
},
{
"epoch": 9.965753424657533,
"grad_norm": 0.40625,
"learning_rate": 6.112504946099604e-05,
"loss": 0.8875,
"step": 1455
},
{
"epoch": 10.0,
"grad_norm": 0.431640625,
"learning_rate": 6.039202339608432e-05,
"loss": 0.889,
"step": 1460
},
{
"epoch": 10.0,
"eval_loss": 2.6852145195007324,
"eval_runtime": 0.5511,
"eval_samples_per_second": 18.146,
"eval_steps_per_second": 1.815,
"step": 1460
},
{
"epoch": 10.034246575342467,
"grad_norm": 0.40625,
"learning_rate": 5.966151296174268e-05,
"loss": 0.8664,
"step": 1465
},
{
"epoch": 10.068493150684931,
"grad_norm": 0.431640625,
"learning_rate": 5.8933564555049105e-05,
"loss": 0.8677,
"step": 1470
},
{
"epoch": 10.102739726027398,
"grad_norm": 0.41796875,
"learning_rate": 5.820822441035899e-05,
"loss": 0.866,
"step": 1475
},
{
"epoch": 10.136986301369863,
"grad_norm": 0.40625,
"learning_rate": 5.7485538596368496e-05,
"loss": 0.8664,
"step": 1480
},
{
"epoch": 10.17123287671233,
"grad_norm": 0.41015625,
"learning_rate": 5.6765553013188766e-05,
"loss": 0.8645,
"step": 1485
},
{
"epoch": 10.205479452054794,
"grad_norm": 0.400390625,
"learning_rate": 5.6048313389430484e-05,
"loss": 0.8624,
"step": 1490
},
{
"epoch": 10.23972602739726,
"grad_norm": 0.408203125,
"learning_rate": 5.533386527929962e-05,
"loss": 0.874,
"step": 1495
},
{
"epoch": 10.273972602739725,
"grad_norm": 0.40625,
"learning_rate": 5.462225405970401e-05,
"loss": 0.8708,
"step": 1500
},
{
"epoch": 10.273972602739725,
"step": 1500,
"total_flos": 8.853977907740017e+17,
"train_loss": 0.0,
"train_runtime": 2.8738,
"train_samples_per_second": 24365.255,
"train_steps_per_second": 508.044
}
],
"logging_steps": 5,
"max_steps": 1460,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.853977907740017e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}