ryusangwon's picture
Model save
fa11b45 verified
raw
history blame contribute delete
No virus
30.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7433751743375174,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.9883774988377505e-05,
"loss": 1.7968,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 4.9767549976755e-05,
"loss": 1.745,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 4.9651324965132504e-05,
"loss": 1.6936,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 4.953509995351e-05,
"loss": 1.72,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 4.9418874941887496e-05,
"loss": 1.6769,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 4.930264993026499e-05,
"loss": 1.6031,
"step": 60
},
{
"epoch": 0.05,
"learning_rate": 4.9186424918642495e-05,
"loss": 1.606,
"step": 70
},
{
"epoch": 0.06,
"learning_rate": 4.907019990701999e-05,
"loss": 1.6362,
"step": 80
},
{
"epoch": 0.06,
"learning_rate": 4.895397489539749e-05,
"loss": 1.5938,
"step": 90
},
{
"epoch": 0.07,
"learning_rate": 4.883774988377499e-05,
"loss": 1.5678,
"step": 100
},
{
"epoch": 0.08,
"learning_rate": 4.872152487215249e-05,
"loss": 1.5751,
"step": 110
},
{
"epoch": 0.08,
"learning_rate": 4.860529986052999e-05,
"loss": 1.5708,
"step": 120
},
{
"epoch": 0.09,
"learning_rate": 4.848907484890749e-05,
"loss": 1.5404,
"step": 130
},
{
"epoch": 0.1,
"learning_rate": 4.837284983728499e-05,
"loss": 1.5682,
"step": 140
},
{
"epoch": 0.1,
"learning_rate": 4.825662482566248e-05,
"loss": 1.5564,
"step": 150
},
{
"epoch": 0.11,
"learning_rate": 4.8140399814039985e-05,
"loss": 1.6038,
"step": 160
},
{
"epoch": 0.12,
"learning_rate": 4.802417480241748e-05,
"loss": 1.6037,
"step": 170
},
{
"epoch": 0.13,
"learning_rate": 4.7907949790794984e-05,
"loss": 1.5927,
"step": 180
},
{
"epoch": 0.13,
"learning_rate": 4.779172477917248e-05,
"loss": 1.6436,
"step": 190
},
{
"epoch": 0.14,
"learning_rate": 4.767549976754998e-05,
"loss": 1.6203,
"step": 200
},
{
"epoch": 0.15,
"learning_rate": 4.755927475592748e-05,
"loss": 1.6137,
"step": 210
},
{
"epoch": 0.15,
"learning_rate": 4.744304974430498e-05,
"loss": 1.6098,
"step": 220
},
{
"epoch": 0.16,
"learning_rate": 4.732682473268247e-05,
"loss": 1.5774,
"step": 230
},
{
"epoch": 0.17,
"learning_rate": 4.7210599721059973e-05,
"loss": 1.6188,
"step": 240
},
{
"epoch": 0.17,
"learning_rate": 4.709437470943747e-05,
"loss": 1.6105,
"step": 250
},
{
"epoch": 0.18,
"learning_rate": 4.697814969781497e-05,
"loss": 1.6356,
"step": 260
},
{
"epoch": 0.19,
"learning_rate": 4.686192468619247e-05,
"loss": 1.5834,
"step": 270
},
{
"epoch": 0.2,
"learning_rate": 4.674569967456997e-05,
"loss": 1.5534,
"step": 280
},
{
"epoch": 0.2,
"learning_rate": 4.662947466294747e-05,
"loss": 1.5877,
"step": 290
},
{
"epoch": 0.21,
"learning_rate": 4.651324965132497e-05,
"loss": 1.5672,
"step": 300
},
{
"epoch": 0.22,
"learning_rate": 4.6397024639702466e-05,
"loss": 1.5725,
"step": 310
},
{
"epoch": 0.22,
"learning_rate": 4.628079962807996e-05,
"loss": 1.5749,
"step": 320
},
{
"epoch": 0.23,
"learning_rate": 4.6164574616457464e-05,
"loss": 1.5653,
"step": 330
},
{
"epoch": 0.24,
"learning_rate": 4.604834960483496e-05,
"loss": 1.5579,
"step": 340
},
{
"epoch": 0.24,
"learning_rate": 4.593212459321246e-05,
"loss": 1.6035,
"step": 350
},
{
"epoch": 0.25,
"learning_rate": 4.581589958158996e-05,
"loss": 1.5868,
"step": 360
},
{
"epoch": 0.26,
"learning_rate": 4.569967456996746e-05,
"loss": 1.5886,
"step": 370
},
{
"epoch": 0.26,
"learning_rate": 4.558344955834496e-05,
"loss": 1.5933,
"step": 380
},
{
"epoch": 0.27,
"learning_rate": 4.546722454672246e-05,
"loss": 1.5511,
"step": 390
},
{
"epoch": 0.28,
"learning_rate": 4.5350999535099956e-05,
"loss": 1.6039,
"step": 400
},
{
"epoch": 0.29,
"learning_rate": 4.523477452347745e-05,
"loss": 1.5434,
"step": 410
},
{
"epoch": 0.29,
"learning_rate": 4.5118549511854955e-05,
"loss": 1.6576,
"step": 420
},
{
"epoch": 0.3,
"learning_rate": 4.500232450023245e-05,
"loss": 1.5434,
"step": 430
},
{
"epoch": 0.31,
"learning_rate": 4.4886099488609954e-05,
"loss": 1.5774,
"step": 440
},
{
"epoch": 0.31,
"learning_rate": 4.476987447698745e-05,
"loss": 1.5882,
"step": 450
},
{
"epoch": 0.32,
"learning_rate": 4.465364946536495e-05,
"loss": 1.607,
"step": 460
},
{
"epoch": 0.33,
"learning_rate": 4.453742445374245e-05,
"loss": 1.6005,
"step": 470
},
{
"epoch": 0.33,
"learning_rate": 4.442119944211995e-05,
"loss": 1.5715,
"step": 480
},
{
"epoch": 0.34,
"learning_rate": 4.430497443049745e-05,
"loss": 1.5876,
"step": 490
},
{
"epoch": 0.35,
"learning_rate": 4.418874941887494e-05,
"loss": 1.5355,
"step": 500
},
{
"epoch": 0.36,
"learning_rate": 4.407252440725244e-05,
"loss": 1.5767,
"step": 510
},
{
"epoch": 0.36,
"learning_rate": 4.395629939562994e-05,
"loss": 1.6073,
"step": 520
},
{
"epoch": 0.37,
"learning_rate": 4.384007438400744e-05,
"loss": 1.5736,
"step": 530
},
{
"epoch": 0.38,
"learning_rate": 4.372384937238494e-05,
"loss": 1.582,
"step": 540
},
{
"epoch": 0.38,
"learning_rate": 4.3607624360762437e-05,
"loss": 1.5833,
"step": 550
},
{
"epoch": 0.39,
"learning_rate": 4.349139934913994e-05,
"loss": 1.574,
"step": 560
},
{
"epoch": 0.4,
"learning_rate": 4.3375174337517435e-05,
"loss": 1.6007,
"step": 570
},
{
"epoch": 0.4,
"learning_rate": 4.325894932589494e-05,
"loss": 1.5853,
"step": 580
},
{
"epoch": 0.41,
"learning_rate": 4.3142724314272434e-05,
"loss": 1.5949,
"step": 590
},
{
"epoch": 0.42,
"learning_rate": 4.302649930264993e-05,
"loss": 1.5795,
"step": 600
},
{
"epoch": 0.43,
"learning_rate": 4.291027429102743e-05,
"loss": 1.5985,
"step": 610
},
{
"epoch": 0.43,
"learning_rate": 4.279404927940493e-05,
"loss": 1.6253,
"step": 620
},
{
"epoch": 0.44,
"learning_rate": 4.267782426778243e-05,
"loss": 1.589,
"step": 630
},
{
"epoch": 0.45,
"learning_rate": 4.256159925615993e-05,
"loss": 1.568,
"step": 640
},
{
"epoch": 0.45,
"learning_rate": 4.244537424453743e-05,
"loss": 1.5818,
"step": 650
},
{
"epoch": 0.46,
"learning_rate": 4.2329149232914926e-05,
"loss": 1.6058,
"step": 660
},
{
"epoch": 0.47,
"learning_rate": 4.221292422129242e-05,
"loss": 1.5825,
"step": 670
},
{
"epoch": 0.47,
"learning_rate": 4.209669920966992e-05,
"loss": 1.567,
"step": 680
},
{
"epoch": 0.48,
"learning_rate": 4.198047419804742e-05,
"loss": 1.5481,
"step": 690
},
{
"epoch": 0.49,
"learning_rate": 4.186424918642492e-05,
"loss": 1.5981,
"step": 700
},
{
"epoch": 0.5,
"learning_rate": 4.174802417480242e-05,
"loss": 1.5437,
"step": 710
},
{
"epoch": 0.5,
"learning_rate": 4.1631799163179915e-05,
"loss": 1.5912,
"step": 720
},
{
"epoch": 0.51,
"learning_rate": 4.151557415155742e-05,
"loss": 1.5495,
"step": 730
},
{
"epoch": 0.52,
"learning_rate": 4.139934913993492e-05,
"loss": 1.5953,
"step": 740
},
{
"epoch": 0.52,
"learning_rate": 4.128312412831242e-05,
"loss": 1.5615,
"step": 750
},
{
"epoch": 0.53,
"learning_rate": 4.116689911668991e-05,
"loss": 1.5466,
"step": 760
},
{
"epoch": 0.54,
"learning_rate": 4.105067410506741e-05,
"loss": 1.5931,
"step": 770
},
{
"epoch": 0.54,
"learning_rate": 4.093444909344491e-05,
"loss": 1.5645,
"step": 780
},
{
"epoch": 0.55,
"learning_rate": 4.081822408182241e-05,
"loss": 1.6358,
"step": 790
},
{
"epoch": 0.56,
"learning_rate": 4.070199907019991e-05,
"loss": 1.5531,
"step": 800
},
{
"epoch": 0.56,
"learning_rate": 4.0585774058577406e-05,
"loss": 1.6038,
"step": 810
},
{
"epoch": 0.57,
"learning_rate": 4.046954904695491e-05,
"loss": 1.5844,
"step": 820
},
{
"epoch": 0.58,
"learning_rate": 4.0353324035332405e-05,
"loss": 1.5652,
"step": 830
},
{
"epoch": 0.59,
"learning_rate": 4.023709902370991e-05,
"loss": 1.545,
"step": 840
},
{
"epoch": 0.59,
"learning_rate": 4.0120874012087404e-05,
"loss": 1.5618,
"step": 850
},
{
"epoch": 0.6,
"learning_rate": 4.00046490004649e-05,
"loss": 1.5778,
"step": 860
},
{
"epoch": 0.61,
"learning_rate": 3.98884239888424e-05,
"loss": 1.6248,
"step": 870
},
{
"epoch": 0.61,
"learning_rate": 3.97721989772199e-05,
"loss": 1.6243,
"step": 880
},
{
"epoch": 0.62,
"learning_rate": 3.96559739655974e-05,
"loss": 1.5639,
"step": 890
},
{
"epoch": 0.63,
"learning_rate": 3.95397489539749e-05,
"loss": 1.6058,
"step": 900
},
{
"epoch": 0.63,
"learning_rate": 3.94235239423524e-05,
"loss": 1.5932,
"step": 910
},
{
"epoch": 0.64,
"learning_rate": 3.9307298930729896e-05,
"loss": 1.6157,
"step": 920
},
{
"epoch": 0.65,
"learning_rate": 3.91910739191074e-05,
"loss": 1.5969,
"step": 930
},
{
"epoch": 0.66,
"learning_rate": 3.907484890748489e-05,
"loss": 1.5991,
"step": 940
},
{
"epoch": 0.66,
"learning_rate": 3.895862389586239e-05,
"loss": 1.5678,
"step": 950
},
{
"epoch": 0.67,
"learning_rate": 3.8842398884239886e-05,
"loss": 1.5744,
"step": 960
},
{
"epoch": 0.68,
"learning_rate": 3.872617387261739e-05,
"loss": 1.5723,
"step": 970
},
{
"epoch": 0.68,
"learning_rate": 3.8609948860994885e-05,
"loss": 1.6042,
"step": 980
},
{
"epoch": 0.69,
"learning_rate": 3.849372384937239e-05,
"loss": 1.6093,
"step": 990
},
{
"epoch": 0.7,
"learning_rate": 3.8377498837749884e-05,
"loss": 1.5968,
"step": 1000
},
{
"epoch": 0.7,
"learning_rate": 3.8261273826127387e-05,
"loss": 1.5302,
"step": 1010
},
{
"epoch": 0.71,
"learning_rate": 3.814504881450488e-05,
"loss": 1.5719,
"step": 1020
},
{
"epoch": 0.72,
"learning_rate": 3.802882380288238e-05,
"loss": 1.5806,
"step": 1030
},
{
"epoch": 0.73,
"learning_rate": 3.791259879125988e-05,
"loss": 1.6137,
"step": 1040
},
{
"epoch": 0.73,
"learning_rate": 3.779637377963738e-05,
"loss": 1.5715,
"step": 1050
},
{
"epoch": 0.74,
"learning_rate": 3.768014876801488e-05,
"loss": 1.6134,
"step": 1060
},
{
"epoch": 0.75,
"learning_rate": 3.7563923756392376e-05,
"loss": 1.5484,
"step": 1070
},
{
"epoch": 0.75,
"learning_rate": 3.744769874476988e-05,
"loss": 1.6084,
"step": 1080
},
{
"epoch": 0.76,
"learning_rate": 3.7331473733147375e-05,
"loss": 1.5655,
"step": 1090
},
{
"epoch": 0.77,
"learning_rate": 3.721524872152488e-05,
"loss": 1.5301,
"step": 1100
},
{
"epoch": 0.77,
"learning_rate": 3.7099023709902373e-05,
"loss": 1.5907,
"step": 1110
},
{
"epoch": 0.78,
"learning_rate": 3.698279869827987e-05,
"loss": 1.5803,
"step": 1120
},
{
"epoch": 0.79,
"learning_rate": 3.6866573686657365e-05,
"loss": 1.6311,
"step": 1130
},
{
"epoch": 0.79,
"learning_rate": 3.675034867503487e-05,
"loss": 1.5678,
"step": 1140
},
{
"epoch": 0.8,
"learning_rate": 3.663412366341237e-05,
"loss": 1.5814,
"step": 1150
},
{
"epoch": 0.81,
"learning_rate": 3.651789865178987e-05,
"loss": 1.5771,
"step": 1160
},
{
"epoch": 0.82,
"learning_rate": 3.640167364016737e-05,
"loss": 1.6359,
"step": 1170
},
{
"epoch": 0.82,
"learning_rate": 3.6285448628544866e-05,
"loss": 1.5835,
"step": 1180
},
{
"epoch": 0.83,
"learning_rate": 3.616922361692237e-05,
"loss": 1.5558,
"step": 1190
},
{
"epoch": 0.84,
"learning_rate": 3.6052998605299864e-05,
"loss": 1.5884,
"step": 1200
},
{
"epoch": 0.84,
"learning_rate": 3.593677359367736e-05,
"loss": 1.5215,
"step": 1210
},
{
"epoch": 0.85,
"learning_rate": 3.5820548582054856e-05,
"loss": 1.5574,
"step": 1220
},
{
"epoch": 0.86,
"learning_rate": 3.570432357043236e-05,
"loss": 1.5946,
"step": 1230
},
{
"epoch": 0.86,
"learning_rate": 3.5588098558809855e-05,
"loss": 1.5804,
"step": 1240
},
{
"epoch": 0.87,
"learning_rate": 3.547187354718736e-05,
"loss": 1.6064,
"step": 1250
},
{
"epoch": 0.88,
"learning_rate": 3.5355648535564854e-05,
"loss": 1.6033,
"step": 1260
},
{
"epoch": 0.89,
"learning_rate": 3.5239423523942356e-05,
"loss": 1.6202,
"step": 1270
},
{
"epoch": 0.89,
"learning_rate": 3.512319851231985e-05,
"loss": 1.5858,
"step": 1280
},
{
"epoch": 0.9,
"learning_rate": 3.5006973500697355e-05,
"loss": 1.5784,
"step": 1290
},
{
"epoch": 0.91,
"learning_rate": 3.489074848907485e-05,
"loss": 1.5863,
"step": 1300
},
{
"epoch": 0.91,
"learning_rate": 3.477452347745235e-05,
"loss": 1.5941,
"step": 1310
},
{
"epoch": 0.92,
"learning_rate": 3.465829846582985e-05,
"loss": 1.6208,
"step": 1320
},
{
"epoch": 0.93,
"learning_rate": 3.4542073454207346e-05,
"loss": 1.5731,
"step": 1330
},
{
"epoch": 0.93,
"learning_rate": 3.442584844258485e-05,
"loss": 1.6135,
"step": 1340
},
{
"epoch": 0.94,
"learning_rate": 3.4309623430962344e-05,
"loss": 1.6136,
"step": 1350
},
{
"epoch": 0.95,
"learning_rate": 3.419339841933985e-05,
"loss": 1.5888,
"step": 1360
},
{
"epoch": 0.96,
"learning_rate": 3.407717340771734e-05,
"loss": 1.6092,
"step": 1370
},
{
"epoch": 0.96,
"learning_rate": 3.3960948396094846e-05,
"loss": 1.622,
"step": 1380
},
{
"epoch": 0.97,
"learning_rate": 3.3844723384472335e-05,
"loss": 1.5745,
"step": 1390
},
{
"epoch": 0.98,
"learning_rate": 3.372849837284984e-05,
"loss": 1.5963,
"step": 1400
},
{
"epoch": 0.98,
"learning_rate": 3.3612273361227334e-05,
"loss": 1.5756,
"step": 1410
},
{
"epoch": 0.99,
"learning_rate": 3.3496048349604837e-05,
"loss": 1.5973,
"step": 1420
},
{
"epoch": 1.0,
"learning_rate": 3.337982333798233e-05,
"loss": 1.5874,
"step": 1430
},
{
"epoch": 1.0,
"learning_rate": 3.3263598326359835e-05,
"loss": 1.5997,
"step": 1440
},
{
"epoch": 1.01,
"learning_rate": 3.314737331473733e-05,
"loss": 1.5552,
"step": 1450
},
{
"epoch": 1.02,
"learning_rate": 3.3031148303114834e-05,
"loss": 1.5888,
"step": 1460
},
{
"epoch": 1.03,
"learning_rate": 3.291492329149234e-05,
"loss": 1.5288,
"step": 1470
},
{
"epoch": 1.03,
"learning_rate": 3.2798698279869826e-05,
"loss": 1.5818,
"step": 1480
},
{
"epoch": 1.04,
"learning_rate": 3.268247326824733e-05,
"loss": 1.5702,
"step": 1490
},
{
"epoch": 1.05,
"learning_rate": 3.2566248256624825e-05,
"loss": 1.5365,
"step": 1500
},
{
"epoch": 1.05,
"learning_rate": 3.245002324500233e-05,
"loss": 1.5026,
"step": 1510
},
{
"epoch": 1.06,
"learning_rate": 3.233379823337982e-05,
"loss": 1.5621,
"step": 1520
},
{
"epoch": 1.07,
"learning_rate": 3.2217573221757326e-05,
"loss": 1.5647,
"step": 1530
},
{
"epoch": 1.07,
"learning_rate": 3.210134821013482e-05,
"loss": 1.555,
"step": 1540
},
{
"epoch": 1.08,
"learning_rate": 3.1985123198512325e-05,
"loss": 1.56,
"step": 1550
},
{
"epoch": 1.09,
"learning_rate": 3.186889818688982e-05,
"loss": 1.6286,
"step": 1560
},
{
"epoch": 1.09,
"learning_rate": 3.175267317526732e-05,
"loss": 1.5647,
"step": 1570
},
{
"epoch": 1.1,
"learning_rate": 3.163644816364482e-05,
"loss": 1.5405,
"step": 1580
},
{
"epoch": 1.11,
"learning_rate": 3.1520223152022315e-05,
"loss": 1.5644,
"step": 1590
},
{
"epoch": 1.12,
"learning_rate": 3.140399814039982e-05,
"loss": 1.6191,
"step": 1600
},
{
"epoch": 1.12,
"learning_rate": 3.1287773128777314e-05,
"loss": 1.5836,
"step": 1610
},
{
"epoch": 1.13,
"learning_rate": 3.117154811715482e-05,
"loss": 1.5674,
"step": 1620
},
{
"epoch": 1.14,
"learning_rate": 3.105532310553231e-05,
"loss": 1.5884,
"step": 1630
},
{
"epoch": 1.14,
"learning_rate": 3.0939098093909816e-05,
"loss": 1.5608,
"step": 1640
},
{
"epoch": 1.15,
"learning_rate": 3.082287308228731e-05,
"loss": 1.567,
"step": 1650
},
{
"epoch": 1.16,
"learning_rate": 3.070664807066481e-05,
"loss": 1.589,
"step": 1660
},
{
"epoch": 1.16,
"learning_rate": 3.0590423059042303e-05,
"loss": 1.6158,
"step": 1670
},
{
"epoch": 1.17,
"learning_rate": 3.0474198047419806e-05,
"loss": 1.5817,
"step": 1680
},
{
"epoch": 1.18,
"learning_rate": 3.0357973035797306e-05,
"loss": 1.606,
"step": 1690
},
{
"epoch": 1.19,
"learning_rate": 3.0241748024174805e-05,
"loss": 1.5705,
"step": 1700
},
{
"epoch": 1.19,
"learning_rate": 3.0125523012552304e-05,
"loss": 1.5582,
"step": 1710
},
{
"epoch": 1.2,
"learning_rate": 3.0009298000929804e-05,
"loss": 1.5409,
"step": 1720
},
{
"epoch": 1.21,
"learning_rate": 2.9893072989307303e-05,
"loss": 1.6384,
"step": 1730
},
{
"epoch": 1.21,
"learning_rate": 2.9776847977684802e-05,
"loss": 1.5539,
"step": 1740
},
{
"epoch": 1.22,
"learning_rate": 2.9660622966062295e-05,
"loss": 1.5763,
"step": 1750
},
{
"epoch": 1.23,
"learning_rate": 2.9544397954439794e-05,
"loss": 1.5333,
"step": 1760
},
{
"epoch": 1.23,
"learning_rate": 2.9428172942817294e-05,
"loss": 1.5786,
"step": 1770
},
{
"epoch": 1.24,
"learning_rate": 2.9311947931194793e-05,
"loss": 1.5807,
"step": 1780
},
{
"epoch": 1.25,
"learning_rate": 2.9195722919572292e-05,
"loss": 1.6011,
"step": 1790
},
{
"epoch": 1.26,
"learning_rate": 2.9079497907949792e-05,
"loss": 1.5959,
"step": 1800
},
{
"epoch": 1.26,
"learning_rate": 2.896327289632729e-05,
"loss": 1.5689,
"step": 1810
},
{
"epoch": 1.27,
"learning_rate": 2.884704788470479e-05,
"loss": 1.5855,
"step": 1820
},
{
"epoch": 1.28,
"learning_rate": 2.8730822873082286e-05,
"loss": 1.5625,
"step": 1830
},
{
"epoch": 1.28,
"learning_rate": 2.8614597861459786e-05,
"loss": 1.5804,
"step": 1840
},
{
"epoch": 1.29,
"learning_rate": 2.8498372849837285e-05,
"loss": 1.5756,
"step": 1850
},
{
"epoch": 1.3,
"learning_rate": 2.8382147838214784e-05,
"loss": 1.5486,
"step": 1860
},
{
"epoch": 1.3,
"learning_rate": 2.8265922826592284e-05,
"loss": 1.5671,
"step": 1870
},
{
"epoch": 1.31,
"learning_rate": 2.8149697814969783e-05,
"loss": 1.6146,
"step": 1880
},
{
"epoch": 1.32,
"learning_rate": 2.8033472803347283e-05,
"loss": 1.5203,
"step": 1890
},
{
"epoch": 1.32,
"learning_rate": 2.7917247791724782e-05,
"loss": 1.5485,
"step": 1900
},
{
"epoch": 1.33,
"learning_rate": 2.780102278010228e-05,
"loss": 1.6385,
"step": 1910
},
{
"epoch": 1.34,
"learning_rate": 2.7684797768479774e-05,
"loss": 1.6054,
"step": 1920
},
{
"epoch": 1.35,
"learning_rate": 2.7568572756857277e-05,
"loss": 1.5451,
"step": 1930
},
{
"epoch": 1.35,
"learning_rate": 2.7452347745234776e-05,
"loss": 1.5785,
"step": 1940
},
{
"epoch": 1.36,
"learning_rate": 2.7336122733612275e-05,
"loss": 1.6233,
"step": 1950
},
{
"epoch": 1.37,
"learning_rate": 2.7219897721989775e-05,
"loss": 1.5784,
"step": 1960
},
{
"epoch": 1.37,
"learning_rate": 2.7103672710367274e-05,
"loss": 1.5794,
"step": 1970
},
{
"epoch": 1.38,
"learning_rate": 2.6987447698744773e-05,
"loss": 1.5412,
"step": 1980
},
{
"epoch": 1.39,
"learning_rate": 2.6871222687122273e-05,
"loss": 1.5684,
"step": 1990
},
{
"epoch": 1.39,
"learning_rate": 2.6754997675499772e-05,
"loss": 1.5771,
"step": 2000
},
{
"epoch": 1.4,
"learning_rate": 2.6638772663877265e-05,
"loss": 1.5361,
"step": 2010
},
{
"epoch": 1.41,
"learning_rate": 2.6522547652254764e-05,
"loss": 1.6065,
"step": 2020
},
{
"epoch": 1.42,
"learning_rate": 2.6406322640632263e-05,
"loss": 1.5777,
"step": 2030
},
{
"epoch": 1.42,
"learning_rate": 2.6290097629009763e-05,
"loss": 1.5802,
"step": 2040
},
{
"epoch": 1.43,
"learning_rate": 2.6173872617387262e-05,
"loss": 1.5399,
"step": 2050
},
{
"epoch": 1.44,
"learning_rate": 2.605764760576476e-05,
"loss": 1.5979,
"step": 2060
},
{
"epoch": 1.44,
"learning_rate": 2.594142259414226e-05,
"loss": 1.5783,
"step": 2070
},
{
"epoch": 1.45,
"learning_rate": 2.582519758251976e-05,
"loss": 1.5719,
"step": 2080
},
{
"epoch": 1.46,
"learning_rate": 2.570897257089726e-05,
"loss": 1.5893,
"step": 2090
},
{
"epoch": 1.46,
"learning_rate": 2.5592747559274755e-05,
"loss": 1.5838,
"step": 2100
},
{
"epoch": 1.47,
"learning_rate": 2.5476522547652255e-05,
"loss": 1.6062,
"step": 2110
},
{
"epoch": 1.48,
"learning_rate": 2.5360297536029754e-05,
"loss": 1.5292,
"step": 2120
},
{
"epoch": 1.49,
"learning_rate": 2.5244072524407254e-05,
"loss": 1.5686,
"step": 2130
},
{
"epoch": 1.49,
"learning_rate": 2.5127847512784753e-05,
"loss": 1.5955,
"step": 2140
},
{
"epoch": 1.5,
"learning_rate": 2.5011622501162252e-05,
"loss": 1.608,
"step": 2150
},
{
"epoch": 1.51,
"learning_rate": 2.489539748953975e-05,
"loss": 1.551,
"step": 2160
},
{
"epoch": 1.51,
"learning_rate": 2.4779172477917248e-05,
"loss": 1.5327,
"step": 2170
},
{
"epoch": 1.52,
"learning_rate": 2.4662947466294747e-05,
"loss": 1.5664,
"step": 2180
},
{
"epoch": 1.53,
"learning_rate": 2.4546722454672246e-05,
"loss": 1.5965,
"step": 2190
},
{
"epoch": 1.53,
"learning_rate": 2.4430497443049746e-05,
"loss": 1.5757,
"step": 2200
},
{
"epoch": 1.54,
"learning_rate": 2.431427243142724e-05,
"loss": 1.5749,
"step": 2210
},
{
"epoch": 1.55,
"learning_rate": 2.419804741980474e-05,
"loss": 1.5758,
"step": 2220
},
{
"epoch": 1.56,
"learning_rate": 2.408182240818224e-05,
"loss": 1.5965,
"step": 2230
},
{
"epoch": 1.56,
"learning_rate": 2.396559739655974e-05,
"loss": 1.5727,
"step": 2240
},
{
"epoch": 1.57,
"learning_rate": 2.3849372384937242e-05,
"loss": 1.5425,
"step": 2250
},
{
"epoch": 1.58,
"learning_rate": 2.373314737331474e-05,
"loss": 1.589,
"step": 2260
},
{
"epoch": 1.58,
"learning_rate": 2.3616922361692238e-05,
"loss": 1.5723,
"step": 2270
},
{
"epoch": 1.59,
"learning_rate": 2.3500697350069737e-05,
"loss": 1.5764,
"step": 2280
},
{
"epoch": 1.6,
"learning_rate": 2.3384472338447236e-05,
"loss": 1.5817,
"step": 2290
},
{
"epoch": 1.6,
"learning_rate": 2.3268247326824732e-05,
"loss": 1.5438,
"step": 2300
},
{
"epoch": 1.61,
"learning_rate": 2.3152022315202232e-05,
"loss": 1.5678,
"step": 2310
},
{
"epoch": 1.62,
"learning_rate": 2.303579730357973e-05,
"loss": 1.6154,
"step": 2320
},
{
"epoch": 1.62,
"learning_rate": 2.291957229195723e-05,
"loss": 1.5987,
"step": 2330
},
{
"epoch": 1.63,
"learning_rate": 2.280334728033473e-05,
"loss": 1.5824,
"step": 2340
},
{
"epoch": 1.64,
"learning_rate": 2.2687122268712226e-05,
"loss": 1.546,
"step": 2350
},
{
"epoch": 1.65,
"learning_rate": 2.2570897257089725e-05,
"loss": 1.5595,
"step": 2360
},
{
"epoch": 1.65,
"learning_rate": 2.2454672245467225e-05,
"loss": 1.5642,
"step": 2370
},
{
"epoch": 1.66,
"learning_rate": 2.2338447233844724e-05,
"loss": 1.5573,
"step": 2380
},
{
"epoch": 1.67,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.5743,
"step": 2390
},
{
"epoch": 1.67,
"learning_rate": 2.2105997210599723e-05,
"loss": 1.5902,
"step": 2400
},
{
"epoch": 1.68,
"learning_rate": 2.1989772198977222e-05,
"loss": 1.5863,
"step": 2410
},
{
"epoch": 1.69,
"learning_rate": 2.187354718735472e-05,
"loss": 1.5561,
"step": 2420
},
{
"epoch": 1.69,
"learning_rate": 2.175732217573222e-05,
"loss": 1.5889,
"step": 2430
},
{
"epoch": 1.7,
"learning_rate": 2.1641097164109717e-05,
"loss": 1.5663,
"step": 2440
},
{
"epoch": 1.71,
"learning_rate": 2.1524872152487216e-05,
"loss": 1.5802,
"step": 2450
},
{
"epoch": 1.72,
"learning_rate": 2.1408647140864715e-05,
"loss": 1.5521,
"step": 2460
},
{
"epoch": 1.72,
"learning_rate": 2.1292422129242215e-05,
"loss": 1.5908,
"step": 2470
},
{
"epoch": 1.73,
"learning_rate": 2.117619711761971e-05,
"loss": 1.5885,
"step": 2480
},
{
"epoch": 1.74,
"learning_rate": 2.105997210599721e-05,
"loss": 1.5364,
"step": 2490
},
{
"epoch": 1.74,
"learning_rate": 2.094374709437471e-05,
"loss": 1.5385,
"step": 2500
}
],
"logging_steps": 10,
"max_steps": 4302,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 1.579588036193157e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}