SLT-Task1-Llama2-7b-HyPo-baseline / trainer_state.json
yentinglin's picture
Upload folder using huggingface_hub
729c9ed verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9968387776606953,
"eval_steps": 100,
"global_step": 1422,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 1.8102548576192377,
"learning_rate": 2.5e-05,
"loss": 0.681,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 1.6409873968033384,
"learning_rate": 5e-05,
"loss": 0.3735,
"step": 20
},
{
"epoch": 0.06,
"grad_norm": 1.4241968146548056,
"learning_rate": 4.9993723817567996e-05,
"loss": 0.3481,
"step": 30
},
{
"epoch": 0.08,
"grad_norm": 1.5192149828819788,
"learning_rate": 4.997489842150924e-05,
"loss": 0.3733,
"step": 40
},
{
"epoch": 0.11,
"grad_norm": 1.3829364414625014,
"learning_rate": 4.994353326395334e-05,
"loss": 0.3561,
"step": 50
},
{
"epoch": 0.13,
"grad_norm": 1.6320786389820312,
"learning_rate": 4.989964409317637e-05,
"loss": 0.355,
"step": 60
},
{
"epoch": 0.15,
"grad_norm": 1.4596195084848897,
"learning_rate": 4.984325294569372e-05,
"loss": 0.3444,
"step": 70
},
{
"epoch": 0.17,
"grad_norm": 1.4431914548343205,
"learning_rate": 4.977438813519574e-05,
"loss": 0.3373,
"step": 80
},
{
"epoch": 0.19,
"grad_norm": 2.498892153458844,
"learning_rate": 4.969308423833152e-05,
"loss": 0.3602,
"step": 90
},
{
"epoch": 0.21,
"grad_norm": 1.6601234439104857,
"learning_rate": 4.9599382077348205e-05,
"loss": 0.3436,
"step": 100
},
{
"epoch": 0.21,
"eval_loss": 0.34954115748405457,
"eval_runtime": 46.7928,
"eval_samples_per_second": 288.314,
"eval_steps_per_second": 9.018,
"step": 100
},
{
"epoch": 0.23,
"grad_norm": 1.0930800520190758,
"learning_rate": 4.949332869959432e-05,
"loss": 0.352,
"step": 110
},
{
"epoch": 0.25,
"grad_norm": 1.5471349397538547,
"learning_rate": 4.9374977353897566e-05,
"loss": 0.3426,
"step": 120
},
{
"epoch": 0.27,
"grad_norm": 1.2518152452620943,
"learning_rate": 4.9244387463828876e-05,
"loss": 0.3422,
"step": 130
},
{
"epoch": 0.3,
"grad_norm": 2.0687037118070535,
"learning_rate": 4.910162459786617e-05,
"loss": 0.3378,
"step": 140
},
{
"epoch": 0.32,
"grad_norm": 1.2965517147404728,
"learning_rate": 4.894676043647274e-05,
"loss": 0.3381,
"step": 150
},
{
"epoch": 0.34,
"grad_norm": 1.077453296704333,
"learning_rate": 4.8779872736106916e-05,
"loss": 0.3214,
"step": 160
},
{
"epoch": 0.36,
"grad_norm": 1.1426693406130142,
"learning_rate": 4.8601045290180946e-05,
"loss": 0.3372,
"step": 170
},
{
"epoch": 0.38,
"grad_norm": 1.0178221568525938,
"learning_rate": 4.84103678869888e-05,
"loss": 0.3378,
"step": 180
},
{
"epoch": 0.4,
"grad_norm": 1.228896746635447,
"learning_rate": 4.820793626462391e-05,
"loss": 0.3218,
"step": 190
},
{
"epoch": 0.42,
"grad_norm": 1.7376473974923927,
"learning_rate": 4.799385206290965e-05,
"loss": 0.3306,
"step": 200
},
{
"epoch": 0.42,
"eval_loss": 0.33515387773513794,
"eval_runtime": 47.1074,
"eval_samples_per_second": 286.388,
"eval_steps_per_second": 8.958,
"step": 200
},
{
"epoch": 0.44,
"grad_norm": 1.1779458621544412,
"learning_rate": 4.7768222772366466e-05,
"loss": 0.3288,
"step": 210
},
{
"epoch": 0.46,
"grad_norm": 1.0719044164695737,
"learning_rate": 4.753116168024153e-05,
"loss": 0.3052,
"step": 220
},
{
"epoch": 0.48,
"grad_norm": 1.3092426623376585,
"learning_rate": 4.728278781362777e-05,
"loss": 0.3329,
"step": 230
},
{
"epoch": 0.51,
"grad_norm": 1.3613973232931833,
"learning_rate": 4.702322587970104e-05,
"loss": 0.3305,
"step": 240
},
{
"epoch": 0.53,
"grad_norm": 1.379957402090315,
"learning_rate": 4.6752606203105314e-05,
"loss": 0.3293,
"step": 250
},
{
"epoch": 0.55,
"grad_norm": 1.2064354059145495,
"learning_rate": 4.647106466051741e-05,
"loss": 0.3276,
"step": 260
},
{
"epoch": 0.57,
"grad_norm": 0.9407218615903679,
"learning_rate": 4.617874261242399e-05,
"loss": 0.3148,
"step": 270
},
{
"epoch": 0.59,
"grad_norm": 1.102895278969822,
"learning_rate": 4.5875786832145287e-05,
"loss": 0.3131,
"step": 280
},
{
"epoch": 0.61,
"grad_norm": 1.1283794552904691,
"learning_rate": 4.556234943214095e-05,
"loss": 0.3222,
"step": 290
},
{
"epoch": 0.63,
"grad_norm": 1.3966644012679954,
"learning_rate": 4.523858778763528e-05,
"loss": 0.3417,
"step": 300
},
{
"epoch": 0.63,
"eval_loss": 0.32445600628852844,
"eval_runtime": 47.1805,
"eval_samples_per_second": 285.945,
"eval_steps_per_second": 8.944,
"step": 300
},
{
"epoch": 0.65,
"grad_norm": 1.0630493028269177,
"learning_rate": 4.490466445759988e-05,
"loss": 0.3152,
"step": 310
},
{
"epoch": 0.67,
"grad_norm": 1.0652057869610336,
"learning_rate": 4.456074710313378e-05,
"loss": 0.3038,
"step": 320
},
{
"epoch": 0.7,
"grad_norm": 1.2341586090990317,
"learning_rate": 4.420700840328162e-05,
"loss": 0.3082,
"step": 330
},
{
"epoch": 0.72,
"grad_norm": 0.9252822939526227,
"learning_rate": 4.38436259683325e-05,
"loss": 0.3174,
"step": 340
},
{
"epoch": 0.74,
"grad_norm": 1.1914450398759333,
"learning_rate": 4.347078225064276e-05,
"loss": 0.3238,
"step": 350
},
{
"epoch": 0.76,
"grad_norm": 1.0380458098458347,
"learning_rate": 4.308866445302766e-05,
"loss": 0.3036,
"step": 360
},
{
"epoch": 0.78,
"grad_norm": 1.1100397252938874,
"learning_rate": 4.269746443476787e-05,
"loss": 0.3053,
"step": 370
},
{
"epoch": 0.8,
"grad_norm": 1.0584951830825808,
"learning_rate": 4.2297378615277935e-05,
"loss": 0.3082,
"step": 380
},
{
"epoch": 0.82,
"grad_norm": 1.1192502727228961,
"learning_rate": 4.188860787548522e-05,
"loss": 0.3126,
"step": 390
},
{
"epoch": 0.84,
"grad_norm": 1.0566732730775144,
"learning_rate": 4.1471357456968556e-05,
"loss": 0.3154,
"step": 400
},
{
"epoch": 0.84,
"eval_loss": 0.31601378321647644,
"eval_runtime": 47.231,
"eval_samples_per_second": 285.639,
"eval_steps_per_second": 8.935,
"step": 400
},
{
"epoch": 0.86,
"grad_norm": 1.1380881980287134,
"learning_rate": 4.1045836858907676e-05,
"loss": 0.3144,
"step": 410
},
{
"epoch": 0.89,
"grad_norm": 0.9239785068723881,
"learning_rate": 4.061225973289473e-05,
"loss": 0.3054,
"step": 420
},
{
"epoch": 0.91,
"grad_norm": 1.1226362379499772,
"learning_rate": 4.0170843775661025e-05,
"loss": 0.3132,
"step": 430
},
{
"epoch": 0.93,
"grad_norm": 1.055418877421314,
"learning_rate": 3.9721810619772636e-05,
"loss": 0.3096,
"step": 440
},
{
"epoch": 0.95,
"grad_norm": 1.260485940528199,
"learning_rate": 3.926538572234991e-05,
"loss": 0.3112,
"step": 450
},
{
"epoch": 0.97,
"grad_norm": 0.8290208161083492,
"learning_rate": 3.880179825186667e-05,
"loss": 0.32,
"step": 460
},
{
"epoch": 0.99,
"grad_norm": 0.9894500964017104,
"learning_rate": 3.833128097308594e-05,
"loss": 0.2963,
"step": 470
},
{
"epoch": 1.01,
"grad_norm": 0.8202492183589537,
"learning_rate": 3.785407013019006e-05,
"loss": 0.2216,
"step": 480
},
{
"epoch": 1.03,
"grad_norm": 0.8958874625557091,
"learning_rate": 3.73704053281637e-05,
"loss": 0.1707,
"step": 490
},
{
"epoch": 1.05,
"grad_norm": 0.7913123522777149,
"learning_rate": 3.688052941248956e-05,
"loss": 0.1577,
"step": 500
},
{
"epoch": 1.05,
"eval_loss": 0.3239268660545349,
"eval_runtime": 47.1856,
"eval_samples_per_second": 285.913,
"eval_steps_per_second": 8.943,
"step": 500
},
{
"epoch": 1.07,
"grad_norm": 0.6951457322398226,
"learning_rate": 3.6384688347216875e-05,
"loss": 0.1549,
"step": 510
},
{
"epoch": 1.1,
"grad_norm": 3.3614607662338507,
"learning_rate": 3.58831310914643e-05,
"loss": 0.1597,
"step": 520
},
{
"epoch": 1.12,
"grad_norm": 0.8448116369901294,
"learning_rate": 3.53761094744188e-05,
"loss": 0.1492,
"step": 530
},
{
"epoch": 1.14,
"grad_norm": 0.9188827386043135,
"learning_rate": 3.4863878068893625e-05,
"loss": 0.1576,
"step": 540
},
{
"epoch": 1.16,
"grad_norm": 0.8781597082892243,
"learning_rate": 3.434669406350866e-05,
"loss": 0.152,
"step": 550
},
{
"epoch": 1.18,
"grad_norm": 0.6881353088868859,
"learning_rate": 3.382481713355738e-05,
"loss": 0.1581,
"step": 560
},
{
"epoch": 1.2,
"grad_norm": 1.2684676365868262,
"learning_rate": 3.3298509310625363e-05,
"loss": 0.1574,
"step": 570
},
{
"epoch": 1.22,
"grad_norm": 0.6337141659864904,
"learning_rate": 3.276803485102557e-05,
"loss": 0.1578,
"step": 580
},
{
"epoch": 1.24,
"grad_norm": 0.704714314928959,
"learning_rate": 3.223366010311671e-05,
"loss": 0.1514,
"step": 590
},
{
"epoch": 1.26,
"grad_norm": 0.6460825296146832,
"learning_rate": 3.1695653373571196e-05,
"loss": 0.159,
"step": 600
},
{
"epoch": 1.26,
"eval_loss": 0.32300886511802673,
"eval_runtime": 47.067,
"eval_samples_per_second": 286.634,
"eval_steps_per_second": 8.966,
"step": 600
},
{
"epoch": 1.29,
"grad_norm": 0.8717091665319785,
"learning_rate": 3.115428479265975e-05,
"loss": 0.1527,
"step": 610
},
{
"epoch": 1.31,
"grad_norm": 0.8221409439379637,
"learning_rate": 3.060982617862053e-05,
"loss": 0.1542,
"step": 620
},
{
"epoch": 1.33,
"grad_norm": 0.7676631827886251,
"learning_rate": 3.006255090118059e-05,
"loss": 0.1574,
"step": 630
},
{
"epoch": 1.35,
"grad_norm": 0.8176603071255903,
"learning_rate": 2.9512733744298482e-05,
"loss": 0.1599,
"step": 640
},
{
"epoch": 1.37,
"grad_norm": 0.8478701648849646,
"learning_rate": 2.8960650768196672e-05,
"loss": 0.1527,
"step": 650
},
{
"epoch": 1.39,
"grad_norm": 0.8105241966593794,
"learning_rate": 2.8406579170753205e-05,
"loss": 0.1555,
"step": 660
},
{
"epoch": 1.41,
"grad_norm": 0.7341788063465178,
"learning_rate": 2.785079714832216e-05,
"loss": 0.1503,
"step": 670
},
{
"epoch": 1.43,
"grad_norm": 1.1238252352545457,
"learning_rate": 2.7293583756052755e-05,
"loss": 0.1615,
"step": 680
},
{
"epoch": 1.45,
"grad_norm": 0.7793278106904632,
"learning_rate": 2.673521876777727e-05,
"loss": 0.1527,
"step": 690
},
{
"epoch": 1.48,
"grad_norm": 0.7257177280067018,
"learning_rate": 2.6175982535538098e-05,
"loss": 0.1493,
"step": 700
},
{
"epoch": 1.48,
"eval_loss": 0.3175669312477112,
"eval_runtime": 47.0992,
"eval_samples_per_second": 286.438,
"eval_steps_per_second": 8.96,
"step": 700
},
{
"epoch": 1.5,
"grad_norm": 0.801964046142728,
"learning_rate": 2.561615584882453e-05,
"loss": 0.1607,
"step": 710
},
{
"epoch": 1.52,
"grad_norm": 0.7155830027665171,
"learning_rate": 2.5056019793589858e-05,
"loss": 0.1492,
"step": 720
},
{
"epoch": 1.54,
"grad_norm": 0.7264384440518477,
"learning_rate": 2.449585561111965e-05,
"loss": 0.1324,
"step": 730
},
{
"epoch": 1.56,
"grad_norm": 0.8168302855335549,
"learning_rate": 2.3935944556821966e-05,
"loss": 0.1555,
"step": 740
},
{
"epoch": 1.58,
"grad_norm": 0.7446449421109613,
"learning_rate": 2.3376567759010614e-05,
"loss": 0.1528,
"step": 750
},
{
"epoch": 1.6,
"grad_norm": 0.6027586148022397,
"learning_rate": 2.281800607775211e-05,
"loss": 0.152,
"step": 760
},
{
"epoch": 1.62,
"grad_norm": 0.8554567150163206,
"learning_rate": 2.2260539963847317e-05,
"loss": 0.1534,
"step": 770
},
{
"epoch": 1.64,
"grad_norm": 0.7383522990234253,
"learning_rate": 2.1704449318018692e-05,
"loss": 0.1592,
"step": 780
},
{
"epoch": 1.66,
"grad_norm": 0.7958490328944942,
"learning_rate": 2.1150013350373594e-05,
"loss": 0.1426,
"step": 790
},
{
"epoch": 1.69,
"grad_norm": 0.9825338160992358,
"learning_rate": 2.059751044021441e-05,
"loss": 0.1498,
"step": 800
},
{
"epoch": 1.69,
"eval_loss": 0.31580981612205505,
"eval_runtime": 49.0093,
"eval_samples_per_second": 275.274,
"eval_steps_per_second": 8.611,
"step": 800
},
{
"epoch": 1.71,
"grad_norm": 0.7301707840582428,
"learning_rate": 2.00472179962658e-05,
"loss": 0.1437,
"step": 810
},
{
"epoch": 1.73,
"grad_norm": 0.9685123461176025,
"learning_rate": 1.9499412317389305e-05,
"loss": 0.156,
"step": 820
},
{
"epoch": 1.75,
"grad_norm": 2.10442864010648,
"learning_rate": 1.895436845385516e-05,
"loss": 0.1496,
"step": 830
},
{
"epoch": 1.77,
"grad_norm": 0.9146649049632373,
"learning_rate": 1.8412360069241034e-05,
"loss": 0.1454,
"step": 840
},
{
"epoch": 1.79,
"grad_norm": 1.4265670270210795,
"learning_rate": 1.7873659303027052e-05,
"loss": 0.1492,
"step": 850
},
{
"epoch": 1.81,
"grad_norm": 0.7559717203281935,
"learning_rate": 1.733853663395602e-05,
"loss": 0.1535,
"step": 860
},
{
"epoch": 1.83,
"grad_norm": 1.3311913559020063,
"learning_rate": 1.6807260744227513e-05,
"loss": 0.1479,
"step": 870
},
{
"epoch": 1.85,
"grad_norm": 0.7863355556565459,
"learning_rate": 1.6280098384593966e-05,
"loss": 0.1452,
"step": 880
},
{
"epoch": 1.88,
"grad_norm": 0.6405600189189222,
"learning_rate": 1.5757314240426613e-05,
"loss": 0.1417,
"step": 890
},
{
"epoch": 1.9,
"grad_norm": 0.9800841015815575,
"learning_rate": 1.5239170798818381e-05,
"loss": 0.147,
"step": 900
},
{
"epoch": 1.9,
"eval_loss": 0.3103533089160919,
"eval_runtime": 47.2326,
"eval_samples_per_second": 285.629,
"eval_steps_per_second": 8.935,
"step": 900
},
{
"epoch": 1.92,
"grad_norm": 0.8072145902479386,
"learning_rate": 1.472592821679048e-05,
"loss": 0.1599,
"step": 910
},
{
"epoch": 1.94,
"grad_norm": 0.7396163055843903,
"learning_rate": 1.4217844190669058e-05,
"loss": 0.1413,
"step": 920
},
{
"epoch": 1.96,
"grad_norm": 0.7609487598644061,
"learning_rate": 1.3715173826697209e-05,
"loss": 0.1575,
"step": 930
},
{
"epoch": 1.98,
"grad_norm": 0.899248106175493,
"learning_rate": 1.3218169512947542e-05,
"loss": 0.1368,
"step": 940
},
{
"epoch": 2.0,
"grad_norm": 0.5492609472889338,
"learning_rate": 1.2727080792599455e-05,
"loss": 0.1356,
"step": 950
},
{
"epoch": 2.02,
"grad_norm": 0.5155984048617979,
"learning_rate": 1.2242154238644879e-05,
"loss": 0.0618,
"step": 960
},
{
"epoch": 2.04,
"grad_norm": 0.6456926556116795,
"learning_rate": 1.1763633330085325e-05,
"loss": 0.0638,
"step": 970
},
{
"epoch": 2.07,
"grad_norm": 0.6205542344499085,
"learning_rate": 1.1291758329682358e-05,
"loss": 0.0611,
"step": 980
},
{
"epoch": 2.09,
"grad_norm": 0.4847483179925426,
"learning_rate": 1.0826766163322982e-05,
"loss": 0.0575,
"step": 990
},
{
"epoch": 2.11,
"grad_norm": 0.5413371468713811,
"learning_rate": 1.0368890301060457e-05,
"loss": 0.0583,
"step": 1000
},
{
"epoch": 2.11,
"eval_loss": 0.34516656398773193,
"eval_runtime": 47.23,
"eval_samples_per_second": 285.645,
"eval_steps_per_second": 8.935,
"step": 1000
},
{
"epoch": 2.13,
"grad_norm": 0.5404171416731017,
"learning_rate": 9.918360639890187e-06,
"loss": 0.0601,
"step": 1010
},
{
"epoch": 2.15,
"grad_norm": 0.5620072184000532,
"learning_rate": 9.475403388319752e-06,
"loss": 0.0562,
"step": 1020
},
{
"epoch": 2.17,
"grad_norm": 0.5612527587767797,
"learning_rate": 9.040240952790765e-06,
"loss": 0.06,
"step": 1030
},
{
"epoch": 2.19,
"grad_norm": 0.556109347253415,
"learning_rate": 8.613091826009884e-06,
"loss": 0.0586,
"step": 1040
},
{
"epoch": 2.21,
"grad_norm": 0.526323377202855,
"learning_rate": 8.194170477244729e-06,
"loss": 0.0555,
"step": 1050
},
{
"epoch": 2.23,
"grad_norm": 0.4839147527811038,
"learning_rate": 7.783687244640048e-06,
"loss": 0.0586,
"step": 1060
},
{
"epoch": 2.26,
"grad_norm": 0.5384902556315007,
"learning_rate": 7.38184822960811e-06,
"loss": 0.0563,
"step": 1070
},
{
"epoch": 2.28,
"grad_norm": 0.5487199740187843,
"learning_rate": 6.988855193346236e-06,
"loss": 0.0591,
"step": 1080
},
{
"epoch": 2.3,
"grad_norm": 0.558830265761421,
"learning_rate": 6.604905455533625e-06,
"loss": 0.0581,
"step": 1090
},
{
"epoch": 2.32,
"grad_norm": 0.5905937756559718,
"learning_rate": 6.230191795258228e-06,
"loss": 0.0547,
"step": 1100
},
{
"epoch": 2.32,
"eval_loss": 0.3394644260406494,
"eval_runtime": 47.3785,
"eval_samples_per_second": 284.75,
"eval_steps_per_second": 8.907,
"step": 1100
},
{
"epoch": 2.34,
"grad_norm": 0.5552812370662581,
"learning_rate": 5.864902354223384e-06,
"loss": 0.0647,
"step": 1110
},
{
"epoch": 2.36,
"grad_norm": 0.5583176961830797,
"learning_rate": 5.509220542282864e-06,
"loss": 0.0579,
"step": 1120
},
{
"epoch": 2.38,
"grad_norm": 0.527577115010199,
"learning_rate": 5.163324945351841e-06,
"loss": 0.0547,
"step": 1130
},
{
"epoch": 2.4,
"grad_norm": 0.5723382741785656,
"learning_rate": 4.82738923573983e-06,
"loss": 0.0522,
"step": 1140
},
{
"epoch": 2.42,
"grad_norm": 0.6612580191515155,
"learning_rate": 4.501582084950715e-06,
"loss": 0.0546,
"step": 1150
},
{
"epoch": 2.44,
"grad_norm": 0.5398335429013659,
"learning_rate": 4.186067078993794e-06,
"loss": 0.0549,
"step": 1160
},
{
"epoch": 2.47,
"grad_norm": 0.5344699336645098,
"learning_rate": 3.8810026362480684e-06,
"loss": 0.0587,
"step": 1170
},
{
"epoch": 2.49,
"grad_norm": 0.5444597939849882,
"learning_rate": 3.586541927921222e-06,
"loss": 0.0581,
"step": 1180
},
{
"epoch": 2.51,
"grad_norm": 0.5463988615566789,
"learning_rate": 3.3028328011432157e-06,
"loss": 0.0575,
"step": 1190
},
{
"epoch": 2.53,
"grad_norm": 1.0764210232146447,
"learning_rate": 3.030017704733043e-06,
"loss": 0.0589,
"step": 1200
},
{
"epoch": 2.53,
"eval_loss": 0.34168508648872375,
"eval_runtime": 47.2752,
"eval_samples_per_second": 285.372,
"eval_steps_per_second": 8.926,
"step": 1200
},
{
"epoch": 2.55,
"grad_norm": 0.4953517170839006,
"learning_rate": 2.7682336176759195e-06,
"loss": 0.0581,
"step": 1210
},
{
"epoch": 2.57,
"grad_norm": 0.5541503548086418,
"learning_rate": 2.51761198034689e-06,
"loss": 0.0546,
"step": 1220
},
{
"epoch": 2.59,
"grad_norm": 0.49468890807169186,
"learning_rate": 2.278278628515354e-06,
"loss": 0.0546,
"step": 1230
},
{
"epoch": 2.61,
"grad_norm": 0.5409541969627151,
"learning_rate": 2.050353730163554e-06,
"loss": 0.0587,
"step": 1240
},
{
"epoch": 2.63,
"grad_norm": 0.5627631577280182,
"learning_rate": 1.8339517251509146e-06,
"loss": 0.0591,
"step": 1250
},
{
"epoch": 2.66,
"grad_norm": 0.6303369184474236,
"learning_rate": 1.6291812677544121e-06,
"loss": 0.0565,
"step": 1260
},
{
"epoch": 2.68,
"grad_norm": 0.5950168840485455,
"learning_rate": 1.436145172113834e-06,
"loss": 0.0621,
"step": 1270
},
{
"epoch": 2.7,
"grad_norm": 0.6169520124840763,
"learning_rate": 1.2549403606093525e-06,
"loss": 0.0585,
"step": 1280
},
{
"epoch": 2.72,
"grad_norm": 0.6215760879408516,
"learning_rate": 1.0856578151973246e-06,
"loss": 0.0551,
"step": 1290
},
{
"epoch": 2.74,
"grad_norm": 0.5481992179891889,
"learning_rate": 9.283825317287692e-07,
"loss": 0.0593,
"step": 1300
},
{
"epoch": 2.74,
"eval_loss": 0.34135544300079346,
"eval_runtime": 47.2086,
"eval_samples_per_second": 285.774,
"eval_steps_per_second": 8.939,
"step": 1300
},
{
"epoch": 2.76,
"grad_norm": 0.5600873628791802,
"learning_rate": 7.83193477273364e-07,
"loss": 0.063,
"step": 1310
},
{
"epoch": 2.78,
"grad_norm": 0.47881555590847813,
"learning_rate": 6.501635504705422e-07,
"loss": 0.0556,
"step": 1320
},
{
"epoch": 2.8,
"grad_norm": 0.5702498469737556,
"learning_rate": 5.293595449274685e-07,
"loss": 0.0578,
"step": 1330
},
{
"epoch": 2.82,
"grad_norm": 0.6344603201355465,
"learning_rate": 4.208421156823239e-07,
"loss": 0.0621,
"step": 1340
},
{
"epoch": 2.85,
"grad_norm": 0.4557995262356338,
"learning_rate": 3.2466574874975565e-07,
"loss": 0.0615,
"step": 1350
},
{
"epoch": 2.87,
"grad_norm": 0.60731847293474,
"learning_rate": 2.408787337637164e-07,
"loss": 0.0563,
"step": 1360
},
{
"epoch": 2.89,
"grad_norm": 0.5748533793418215,
"learning_rate": 1.6952313973152834e-07,
"loss": 0.0557,
"step": 1370
},
{
"epoch": 2.91,
"grad_norm": 0.5805376198014929,
"learning_rate": 1.1063479391124898e-07,
"loss": 0.0519,
"step": 1380
},
{
"epoch": 2.93,
"grad_norm": 0.5744481873667274,
"learning_rate": 6.424326382299394e-08,
"loss": 0.0569,
"step": 1390
},
{
"epoch": 2.95,
"grad_norm": 0.6568824061735796,
"learning_rate": 3.037184240325397e-08,
"loss": 0.0624,
"step": 1400
},
{
"epoch": 2.95,
"eval_loss": 0.34196367859840393,
"eval_runtime": 47.1898,
"eval_samples_per_second": 285.888,
"eval_steps_per_second": 8.943,
"step": 1400
},
{
"epoch": 2.97,
"grad_norm": 0.5396314216567086,
"learning_rate": 9.037536309636218e-09,
"loss": 0.0549,
"step": 1410
},
{
"epoch": 2.99,
"grad_norm": 0.6363488595941399,
"learning_rate": 2.5105738190867033e-10,
"loss": 0.0564,
"step": 1420
},
{
"epoch": 3.0,
"step": 1422,
"total_flos": 3.6498441825094205e+18,
"train_loss": 0.18191866144936128,
"train_runtime": 7053.867,
"train_samples_per_second": 51.639,
"train_steps_per_second": 0.202
}
],
"logging_steps": 10,
"max_steps": 1422,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 3.6498441825094205e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}