zephyr-34b-sft-full_epoch1 / trainer_state.json
dlibf's picture
Model save
5d78064 verified
raw
history blame contribute delete
No virus
25.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9997563946406821,
"eval_steps": 500,
"global_step": 1026,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.941747572815534e-07,
"loss": 1.527,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 9.70873786407767e-07,
"loss": 1.5166,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 1.941747572815534e-06,
"loss": 1.3897,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 2.912621359223301e-06,
"loss": 1.1756,
"step": 15
},
{
"epoch": 0.02,
"learning_rate": 3.883495145631068e-06,
"loss": 1.1025,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 4.854368932038836e-06,
"loss": 1.0531,
"step": 25
},
{
"epoch": 0.03,
"learning_rate": 5.825242718446602e-06,
"loss": 1.0663,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 6.79611650485437e-06,
"loss": 1.0582,
"step": 35
},
{
"epoch": 0.04,
"learning_rate": 7.766990291262136e-06,
"loss": 1.0537,
"step": 40
},
{
"epoch": 0.04,
"learning_rate": 8.737864077669904e-06,
"loss": 1.042,
"step": 45
},
{
"epoch": 0.05,
"learning_rate": 9.708737864077671e-06,
"loss": 1.0136,
"step": 50
},
{
"epoch": 0.05,
"learning_rate": 1.0679611650485437e-05,
"loss": 1.0165,
"step": 55
},
{
"epoch": 0.06,
"learning_rate": 1.1650485436893204e-05,
"loss": 1.0045,
"step": 60
},
{
"epoch": 0.06,
"learning_rate": 1.2621359223300974e-05,
"loss": 1.0122,
"step": 65
},
{
"epoch": 0.07,
"learning_rate": 1.359223300970874e-05,
"loss": 1.0107,
"step": 70
},
{
"epoch": 0.07,
"learning_rate": 1.4563106796116507e-05,
"loss": 1.01,
"step": 75
},
{
"epoch": 0.08,
"learning_rate": 1.5533980582524273e-05,
"loss": 1.0034,
"step": 80
},
{
"epoch": 0.08,
"learning_rate": 1.650485436893204e-05,
"loss": 0.9995,
"step": 85
},
{
"epoch": 0.09,
"learning_rate": 1.7475728155339808e-05,
"loss": 0.994,
"step": 90
},
{
"epoch": 0.09,
"learning_rate": 1.8446601941747575e-05,
"loss": 1.015,
"step": 95
},
{
"epoch": 0.1,
"learning_rate": 1.9417475728155343e-05,
"loss": 0.9897,
"step": 100
},
{
"epoch": 0.1,
"learning_rate": 1.999976830073192e-05,
"loss": 1.0033,
"step": 105
},
{
"epoch": 0.11,
"learning_rate": 1.99971618072711e-05,
"loss": 1.0027,
"step": 110
},
{
"epoch": 0.11,
"learning_rate": 1.9991659953668803e-05,
"loss": 0.9943,
"step": 115
},
{
"epoch": 0.12,
"learning_rate": 1.998326433336204e-05,
"loss": 1.0269,
"step": 120
},
{
"epoch": 0.12,
"learning_rate": 1.997197737787537e-05,
"loss": 0.9755,
"step": 125
},
{
"epoch": 0.13,
"learning_rate": 1.9957802356116665e-05,
"loss": 1.0,
"step": 130
},
{
"epoch": 0.13,
"learning_rate": 1.994074337343041e-05,
"loss": 0.9923,
"step": 135
},
{
"epoch": 0.14,
"learning_rate": 1.9920805370408695e-05,
"loss": 1.0044,
"step": 140
},
{
"epoch": 0.14,
"learning_rate": 1.9897994121460322e-05,
"loss": 0.9956,
"step": 145
},
{
"epoch": 0.15,
"learning_rate": 1.9872316233138463e-05,
"loss": 1.0046,
"step": 150
},
{
"epoch": 0.15,
"learning_rate": 1.9843779142227258e-05,
"loss": 0.9857,
"step": 155
},
{
"epoch": 0.16,
"learning_rate": 1.981239111358799e-05,
"loss": 1.0008,
"step": 160
},
{
"epoch": 0.16,
"learning_rate": 1.9778161237765438e-05,
"loss": 1.0068,
"step": 165
},
{
"epoch": 0.17,
"learning_rate": 1.9741099428355075e-05,
"loss": 1.0073,
"step": 170
},
{
"epoch": 0.17,
"learning_rate": 1.9701216419131934e-05,
"loss": 0.9601,
"step": 175
},
{
"epoch": 0.18,
"learning_rate": 1.9658523760941882e-05,
"loss": 1.0001,
"step": 180
},
{
"epoch": 0.18,
"learning_rate": 1.9613033818356322e-05,
"loss": 1.0086,
"step": 185
},
{
"epoch": 0.19,
"learning_rate": 1.956475976609114e-05,
"loss": 1.0117,
"step": 190
},
{
"epoch": 0.19,
"learning_rate": 1.951371558519111e-05,
"loss": 0.9955,
"step": 195
},
{
"epoch": 0.19,
"learning_rate": 1.9459916058980707e-05,
"loss": 1.0014,
"step": 200
},
{
"epoch": 0.2,
"learning_rate": 1.9403376768782592e-05,
"loss": 0.9803,
"step": 205
},
{
"epoch": 0.2,
"learning_rate": 1.9344114089404983e-05,
"loss": 0.9847,
"step": 210
},
{
"epoch": 0.21,
"learning_rate": 1.9282145184399197e-05,
"loss": 0.9946,
"step": 215
},
{
"epoch": 0.21,
"learning_rate": 1.9217488001088784e-05,
"loss": 0.9886,
"step": 220
},
{
"epoch": 0.22,
"learning_rate": 1.9150161265371663e-05,
"loss": 0.981,
"step": 225
},
{
"epoch": 0.22,
"learning_rate": 1.908018447629674e-05,
"loss": 0.9863,
"step": 230
},
{
"epoch": 0.23,
"learning_rate": 1.9007577900416648e-05,
"loss": 0.9907,
"step": 235
},
{
"epoch": 0.23,
"learning_rate": 1.8932362565918167e-05,
"loss": 0.9721,
"step": 240
},
{
"epoch": 0.24,
"learning_rate": 1.8854560256532098e-05,
"loss": 1.0003,
"step": 245
},
{
"epoch": 0.24,
"learning_rate": 1.877419350522429e-05,
"loss": 0.9749,
"step": 250
},
{
"epoch": 0.25,
"learning_rate": 1.869128558766965e-05,
"loss": 0.9967,
"step": 255
},
{
"epoch": 0.25,
"learning_rate": 1.8605860515511158e-05,
"loss": 0.9864,
"step": 260
},
{
"epoch": 0.26,
"learning_rate": 1.8517943029405577e-05,
"loss": 1.0096,
"step": 265
},
{
"epoch": 0.26,
"learning_rate": 1.8427558591858164e-05,
"loss": 0.9643,
"step": 270
},
{
"epoch": 0.27,
"learning_rate": 1.833473337984823e-05,
"loss": 1.001,
"step": 275
},
{
"epoch": 0.27,
"learning_rate": 1.823949427724785e-05,
"loss": 0.996,
"step": 280
},
{
"epoch": 0.28,
"learning_rate": 1.8141868867035745e-05,
"loss": 0.9885,
"step": 285
},
{
"epoch": 0.28,
"learning_rate": 1.8041885423308808e-05,
"loss": 0.9818,
"step": 290
},
{
"epoch": 0.29,
"learning_rate": 1.7939572903093383e-05,
"loss": 0.9936,
"step": 295
},
{
"epoch": 0.29,
"learning_rate": 1.7834960937958775e-05,
"loss": 0.9733,
"step": 300
},
{
"epoch": 0.3,
"learning_rate": 1.7728079825435426e-05,
"loss": 1.0035,
"step": 305
},
{
"epoch": 0.3,
"learning_rate": 1.761896052024019e-05,
"loss": 1.0073,
"step": 310
},
{
"epoch": 0.31,
"learning_rate": 1.7507634625311283e-05,
"loss": 0.9821,
"step": 315
},
{
"epoch": 0.31,
"learning_rate": 1.7394134382655496e-05,
"loss": 0.99,
"step": 320
},
{
"epoch": 0.32,
"learning_rate": 1.7278492664010326e-05,
"loss": 0.9766,
"step": 325
},
{
"epoch": 0.32,
"learning_rate": 1.7160742961323714e-05,
"loss": 0.9783,
"step": 330
},
{
"epoch": 0.33,
"learning_rate": 1.7040919377054182e-05,
"loss": 0.9922,
"step": 335
},
{
"epoch": 0.33,
"learning_rate": 1.6919056614294133e-05,
"loss": 0.9767,
"step": 340
},
{
"epoch": 0.34,
"learning_rate": 1.6795189966719182e-05,
"loss": 0.9937,
"step": 345
},
{
"epoch": 0.34,
"learning_rate": 1.666935530836651e-05,
"loss": 0.9702,
"step": 350
},
{
"epoch": 0.35,
"learning_rate": 1.654158908324504e-05,
"loss": 0.9906,
"step": 355
},
{
"epoch": 0.35,
"learning_rate": 1.6411928294780626e-05,
"loss": 0.9662,
"step": 360
},
{
"epoch": 0.36,
"learning_rate": 1.6280410495099165e-05,
"loss": 0.9802,
"step": 365
},
{
"epoch": 0.36,
"learning_rate": 1.6147073774150834e-05,
"loss": 0.9897,
"step": 370
},
{
"epoch": 0.37,
"learning_rate": 1.601195674867853e-05,
"loss": 0.9696,
"step": 375
},
{
"epoch": 0.37,
"learning_rate": 1.5875098551033765e-05,
"loss": 0.9859,
"step": 380
},
{
"epoch": 0.38,
"learning_rate": 1.5736538817843228e-05,
"loss": 0.9751,
"step": 385
},
{
"epoch": 0.38,
"learning_rate": 1.559631767852929e-05,
"loss": 0.9815,
"step": 390
},
{
"epoch": 0.38,
"learning_rate": 1.54544757436878e-05,
"loss": 0.9913,
"step": 395
},
{
"epoch": 0.39,
"learning_rate": 1.5311054093326508e-05,
"loss": 0.9652,
"step": 400
},
{
"epoch": 0.39,
"learning_rate": 1.5166094264967553e-05,
"loss": 0.9831,
"step": 405
},
{
"epoch": 0.4,
"learning_rate": 1.5019638241617429e-05,
"loss": 0.9598,
"step": 410
},
{
"epoch": 0.4,
"learning_rate": 1.4871728439607967e-05,
"loss": 0.9667,
"step": 415
},
{
"epoch": 0.41,
"learning_rate": 1.472240769631176e-05,
"loss": 0.9764,
"step": 420
},
{
"epoch": 0.41,
"learning_rate": 1.4571719257735702e-05,
"loss": 0.9685,
"step": 425
},
{
"epoch": 0.42,
"learning_rate": 1.4419706765996153e-05,
"loss": 0.9872,
"step": 430
},
{
"epoch": 0.42,
"learning_rate": 1.4266414246679379e-05,
"loss": 0.9727,
"step": 435
},
{
"epoch": 0.43,
"learning_rate": 1.4111886096090953e-05,
"loss": 0.9532,
"step": 440
},
{
"epoch": 0.43,
"learning_rate": 1.3956167068397756e-05,
"loss": 0.9679,
"step": 445
},
{
"epoch": 0.44,
"learning_rate": 1.3799302262666388e-05,
"loss": 0.9779,
"step": 450
},
{
"epoch": 0.44,
"learning_rate": 1.364133710980162e-05,
"loss": 0.9906,
"step": 455
},
{
"epoch": 0.45,
"learning_rate": 1.3482317359388806e-05,
"loss": 0.9756,
"step": 460
},
{
"epoch": 0.45,
"learning_rate": 1.3322289066443947e-05,
"loss": 0.9799,
"step": 465
},
{
"epoch": 0.46,
"learning_rate": 1.316129857807534e-05,
"loss": 0.9677,
"step": 470
},
{
"epoch": 0.46,
"learning_rate": 1.2999392520060594e-05,
"loss": 0.9738,
"step": 475
},
{
"epoch": 0.47,
"learning_rate": 1.2836617783342968e-05,
"loss": 0.9939,
"step": 480
},
{
"epoch": 0.47,
"learning_rate": 1.2673021510450893e-05,
"loss": 0.9618,
"step": 485
},
{
"epoch": 0.48,
"learning_rate": 1.250865108184464e-05,
"loss": 0.9652,
"step": 490
},
{
"epoch": 0.48,
"learning_rate": 1.2343554102194073e-05,
"loss": 0.9915,
"step": 495
},
{
"epoch": 0.49,
"learning_rate": 1.2177778386591475e-05,
"loss": 0.9782,
"step": 500
},
{
"epoch": 0.49,
"learning_rate": 1.2011371946703416e-05,
"loss": 0.9918,
"step": 505
},
{
"epoch": 0.5,
"learning_rate": 1.1844382976865714e-05,
"loss": 0.9813,
"step": 510
},
{
"epoch": 0.5,
"learning_rate": 1.1676859840125468e-05,
"loss": 0.9877,
"step": 515
},
{
"epoch": 0.51,
"learning_rate": 1.1508851054234236e-05,
"loss": 0.9828,
"step": 520
},
{
"epoch": 0.51,
"learning_rate": 1.1340405277596427e-05,
"loss": 0.9661,
"step": 525
},
{
"epoch": 0.52,
"learning_rate": 1.1171571295176915e-05,
"loss": 0.9682,
"step": 530
},
{
"epoch": 0.52,
"learning_rate": 1.1002398004372048e-05,
"loss": 0.9666,
"step": 535
},
{
"epoch": 0.53,
"learning_rate": 1.0832934400848063e-05,
"loss": 0.9526,
"step": 540
},
{
"epoch": 0.53,
"learning_rate": 1.066322956435104e-05,
"loss": 0.9546,
"step": 545
},
{
"epoch": 0.54,
"learning_rate": 1.0493332644492534e-05,
"loss": 0.9561,
"step": 550
},
{
"epoch": 0.54,
"learning_rate": 1.0323292846514927e-05,
"loss": 0.9602,
"step": 555
},
{
"epoch": 0.55,
"learning_rate": 1.015315941704071e-05,
"loss": 0.9579,
"step": 560
},
{
"epoch": 0.55,
"learning_rate": 9.982981629809776e-06,
"loss": 0.959,
"step": 565
},
{
"epoch": 0.56,
"learning_rate": 9.812808771408804e-06,
"loss": 0.9512,
"step": 570
},
{
"epoch": 0.56,
"learning_rate": 9.64269012699702e-06,
"loss": 0.9554,
"step": 575
},
{
"epoch": 0.57,
"learning_rate": 9.472674966032276e-06,
"loss": 0.9724,
"step": 580
},
{
"epoch": 0.57,
"learning_rate": 9.302812528001741e-06,
"loss": 0.9691,
"step": 585
},
{
"epoch": 0.57,
"learning_rate": 9.133152008161235e-06,
"loss": 0.9676,
"step": 590
},
{
"epoch": 0.58,
"learning_rate": 8.963742543287397e-06,
"loss": 0.9603,
"step": 595
},
{
"epoch": 0.58,
"learning_rate": 8.79463319744677e-06,
"loss": 0.9526,
"step": 600
},
{
"epoch": 0.59,
"learning_rate": 8.625872947785968e-06,
"loss": 0.9561,
"step": 605
},
{
"epoch": 0.59,
"learning_rate": 8.457510670346976e-06,
"loss": 0.9543,
"step": 610
},
{
"epoch": 0.6,
"learning_rate": 8.2895951259118e-06,
"loss": 0.9663,
"step": 615
},
{
"epoch": 0.6,
"learning_rate": 8.122174945880409e-06,
"loss": 0.9763,
"step": 620
},
{
"epoch": 0.61,
"learning_rate": 7.955298618186227e-06,
"loss": 0.9621,
"step": 625
},
{
"epoch": 0.61,
"learning_rate": 7.78901447325314e-06,
"loss": 0.9487,
"step": 630
},
{
"epoch": 0.62,
"learning_rate": 7.623370669998115e-06,
"loss": 0.9383,
"step": 635
},
{
"epoch": 0.62,
"learning_rate": 7.458415181883506e-06,
"loss": 0.9694,
"step": 640
},
{
"epoch": 0.63,
"learning_rate": 7.294195783023072e-06,
"loss": 0.9491,
"step": 645
},
{
"epoch": 0.63,
"learning_rate": 7.130760034345708e-06,
"loss": 0.963,
"step": 650
},
{
"epoch": 0.64,
"learning_rate": 6.968155269820951e-06,
"loss": 0.9512,
"step": 655
},
{
"epoch": 0.64,
"learning_rate": 6.806428582750191e-06,
"loss": 0.9609,
"step": 660
},
{
"epoch": 0.65,
"learning_rate": 6.645626812127588e-06,
"loss": 0.9574,
"step": 665
},
{
"epoch": 0.65,
"learning_rate": 6.485796529074662e-06,
"loss": 0.9631,
"step": 670
},
{
"epoch": 0.66,
"learning_rate": 6.326984023352435e-06,
"loss": 0.9627,
"step": 675
},
{
"epoch": 0.66,
"learning_rate": 6.169235289955073e-06,
"loss": 0.9568,
"step": 680
},
{
"epoch": 0.67,
"learning_rate": 6.012596015788903e-06,
"loss": 0.9528,
"step": 685
},
{
"epoch": 0.67,
"learning_rate": 5.8571115664406655e-06,
"loss": 0.9639,
"step": 690
},
{
"epoch": 0.68,
"learning_rate": 5.702826973038776e-06,
"loss": 0.9609,
"step": 695
},
{
"epoch": 0.68,
"learning_rate": 5.549786919211532e-06,
"loss": 0.9379,
"step": 700
},
{
"epoch": 0.69,
"learning_rate": 5.3980357281459e-06,
"loss": 0.9382,
"step": 705
},
{
"epoch": 0.69,
"learning_rate": 5.247617349750717e-06,
"loss": 0.9388,
"step": 710
},
{
"epoch": 0.7,
"learning_rate": 5.0985753479279824e-06,
"loss": 0.9692,
"step": 715
},
{
"epoch": 0.7,
"learning_rate": 4.950952887955992e-06,
"loss": 0.9653,
"step": 720
},
{
"epoch": 0.71,
"learning_rate": 4.80479272398786e-06,
"loss": 0.9613,
"step": 725
},
{
"epoch": 0.71,
"learning_rate": 4.660137186669131e-06,
"loss": 0.943,
"step": 730
},
{
"epoch": 0.72,
"learning_rate": 4.5170281708780865e-06,
"loss": 0.9451,
"step": 735
},
{
"epoch": 0.72,
"learning_rate": 4.375507123592194e-06,
"loss": 0.951,
"step": 740
},
{
"epoch": 0.73,
"learning_rate": 4.235615031884326e-06,
"loss": 0.9433,
"step": 745
},
{
"epoch": 0.73,
"learning_rate": 4.097392411052149e-06,
"loss": 0.9385,
"step": 750
},
{
"epoch": 0.74,
"learning_rate": 3.9608792928841596e-06,
"loss": 0.9488,
"step": 755
},
{
"epoch": 0.74,
"learning_rate": 3.826115214065739e-06,
"loss": 0.9413,
"step": 760
},
{
"epoch": 0.75,
"learning_rate": 3.693139204728623e-06,
"loss": 0.9535,
"step": 765
},
{
"epoch": 0.75,
"learning_rate": 3.561989777147059e-06,
"loss": 0.9384,
"step": 770
},
{
"epoch": 0.76,
"learning_rate": 3.4327049145839496e-06,
"loss": 0.9451,
"step": 775
},
{
"epoch": 0.76,
"learning_rate": 3.3053220602902057e-06,
"loss": 0.9663,
"step": 780
},
{
"epoch": 0.76,
"learning_rate": 3.1798781066605076e-06,
"loss": 0.9551,
"step": 785
},
{
"epoch": 0.77,
"learning_rate": 3.056409384548575e-06,
"loss": 0.958,
"step": 790
},
{
"epoch": 0.77,
"learning_rate": 2.934951652745123e-06,
"loss": 0.949,
"step": 795
},
{
"epoch": 0.78,
"learning_rate": 2.8155400876214365e-06,
"loss": 0.9504,
"step": 800
},
{
"epoch": 0.78,
"learning_rate": 2.698209272941659e-06,
"loss": 0.9387,
"step": 805
},
{
"epoch": 0.79,
"learning_rate": 2.5829931898467143e-06,
"loss": 0.9194,
"step": 810
},
{
"epoch": 0.79,
"learning_rate": 2.469925207012741e-06,
"loss": 0.9436,
"step": 815
},
{
"epoch": 0.8,
"learning_rate": 2.3590380709869175e-06,
"loss": 0.9401,
"step": 820
},
{
"epoch": 0.8,
"learning_rate": 2.2503638967034668e-06,
"loss": 0.9462,
"step": 825
},
{
"epoch": 0.81,
"learning_rate": 2.1439341581825855e-06,
"loss": 0.9553,
"step": 830
},
{
"epoch": 0.81,
"learning_rate": 2.039779679414996e-06,
"loss": 0.9485,
"step": 835
},
{
"epoch": 0.82,
"learning_rate": 1.9379306254347487e-06,
"loss": 0.9417,
"step": 840
},
{
"epoch": 0.82,
"learning_rate": 1.838416493582893e-06,
"loss": 0.933,
"step": 845
},
{
"epoch": 0.83,
"learning_rate": 1.7412661049645097e-06,
"loss": 0.9501,
"step": 850
},
{
"epoch": 0.83,
"learning_rate": 1.6465075961015697e-06,
"loss": 0.9522,
"step": 855
},
{
"epoch": 0.84,
"learning_rate": 1.554168410784117e-06,
"loss": 0.9434,
"step": 860
},
{
"epoch": 0.84,
"learning_rate": 1.4642752921220272e-06,
"loss": 0.938,
"step": 865
},
{
"epoch": 0.85,
"learning_rate": 1.3768542747997215e-06,
"loss": 0.943,
"step": 870
},
{
"epoch": 0.85,
"learning_rate": 1.2919306775360495e-06,
"loss": 0.9551,
"step": 875
},
{
"epoch": 0.86,
"learning_rate": 1.209529095751527e-06,
"loss": 0.9459,
"step": 880
},
{
"epoch": 0.86,
"learning_rate": 1.1296733944450445e-06,
"loss": 0.9526,
"step": 885
},
{
"epoch": 0.87,
"learning_rate": 1.0523867012821444e-06,
"loss": 0.963,
"step": 890
},
{
"epoch": 0.87,
"learning_rate": 9.776913998968196e-07,
"loss": 0.9422,
"step": 895
},
{
"epoch": 0.88,
"learning_rate": 9.056091234088038e-07,
"loss": 0.9408,
"step": 900
},
{
"epoch": 0.88,
"learning_rate": 8.361607481582312e-07,
"loss": 0.9512,
"step": 905
},
{
"epoch": 0.89,
"learning_rate": 7.693663876594648e-07,
"loss": 0.9401,
"step": 910
},
{
"epoch": 0.89,
"learning_rate": 7.052453867758525e-07,
"loss": 0.9523,
"step": 915
},
{
"epoch": 0.9,
"learning_rate": 6.438163161171096e-07,
"loss": 0.9332,
"step": 920
},
{
"epoch": 0.9,
"learning_rate": 5.850969666609363e-07,
"loss": 0.9443,
"step": 925
},
{
"epoch": 0.91,
"learning_rate": 5.291043446004074e-07,
"loss": 0.9423,
"step": 930
},
{
"epoch": 0.91,
"learning_rate": 4.7585466641868696e-07,
"loss": 0.9509,
"step": 935
},
{
"epoch": 0.92,
"learning_rate": 4.25363354192434e-07,
"loss": 0.9512,
"step": 940
},
{
"epoch": 0.92,
"learning_rate": 3.776450311252866e-07,
"loss": 0.953,
"step": 945
},
{
"epoch": 0.93,
"learning_rate": 3.3271351731271717e-07,
"loss": 0.9282,
"step": 950
},
{
"epoch": 0.93,
"learning_rate": 2.905818257394799e-07,
"loss": 0.9639,
"step": 955
},
{
"epoch": 0.94,
"learning_rate": 2.512621585108155e-07,
"loss": 0.9543,
"step": 960
},
{
"epoch": 0.94,
"learning_rate": 2.1476590331849566e-07,
"loss": 0.956,
"step": 965
},
{
"epoch": 0.95,
"learning_rate": 1.811036301427449e-07,
"loss": 0.9625,
"step": 970
},
{
"epoch": 0.95,
"learning_rate": 1.502850881909801e-07,
"loss": 0.9435,
"step": 975
},
{
"epoch": 0.95,
"learning_rate": 1.2231920307425927e-07,
"loss": 0.9538,
"step": 980
},
{
"epoch": 0.96,
"learning_rate": 9.721407422226492e-08,
"loss": 0.9398,
"step": 985
},
{
"epoch": 0.96,
"learning_rate": 7.497697253756265e-08,
"loss": 0.939,
"step": 990
},
{
"epoch": 0.97,
"learning_rate": 5.5614338289812216e-08,
"loss": 0.9302,
"step": 995
},
{
"epoch": 0.97,
"learning_rate": 3.913177925055189e-08,
"loss": 0.9574,
"step": 1000
},
{
"epoch": 0.98,
"learning_rate": 2.5534069069081957e-08,
"loss": 0.9528,
"step": 1005
},
{
"epoch": 0.98,
"learning_rate": 1.482514588993067e-08,
"loss": 0.9216,
"step": 1010
},
{
"epoch": 0.99,
"learning_rate": 7.0081112122966086e-09,
"loss": 0.9417,
"step": 1015
},
{
"epoch": 0.99,
"learning_rate": 2.0852289917971947e-09,
"loss": 0.9419,
"step": 1020
},
{
"epoch": 1.0,
"learning_rate": 5.792498478651709e-11,
"loss": 0.9435,
"step": 1025
},
{
"epoch": 1.0,
"eval_loss": 0.9532989859580994,
"eval_runtime": 1250.6177,
"eval_samples_per_second": 11.625,
"eval_steps_per_second": 0.182,
"step": 1026
},
{
"epoch": 1.0,
"step": 1026,
"total_flos": 1399316853817344.0,
"train_loss": 0.9775699134226199,
"train_runtime": 79518.8807,
"train_samples_per_second": 1.652,
"train_steps_per_second": 0.013
}
],
"logging_steps": 5,
"max_steps": 1026,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"total_flos": 1399316853817344.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}