opt-350m-en-hy-instruct / trainer_state.json
aburnazy's picture
Model save
c5aaa98
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9958002799813346,
"global_step": 1605,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 4.081632653061225e-06,
"loss": 3.83,
"step": 10
},
{
"epoch": 0.04,
"learning_rate": 8.16326530612245e-06,
"loss": 3.7366,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 1.2244897959183674e-05,
"loss": 3.6074,
"step": 30
},
{
"epoch": 0.07,
"learning_rate": 1.63265306122449e-05,
"loss": 3.1722,
"step": 40
},
{
"epoch": 0.09,
"learning_rate": 1.9999979617830125e-05,
"loss": 3.1476,
"step": 50
},
{
"epoch": 0.11,
"learning_rate": 1.9997533857978314e-05,
"loss": 3.0026,
"step": 60
},
{
"epoch": 0.13,
"learning_rate": 1.999101280651531e-05,
"loss": 2.9246,
"step": 70
},
{
"epoch": 0.15,
"learning_rate": 1.99804191216153e-05,
"loss": 2.9575,
"step": 80
},
{
"epoch": 0.17,
"learning_rate": 1.996575712157875e-05,
"loss": 2.8573,
"step": 90
},
{
"epoch": 0.19,
"learning_rate": 1.9947032783072173e-05,
"loss": 2.8785,
"step": 100
},
{
"epoch": 0.21,
"learning_rate": 1.9924253738691836e-05,
"loss": 2.551,
"step": 110
},
{
"epoch": 0.22,
"learning_rate": 1.98974292738525e-05,
"loss": 2.6737,
"step": 120
},
{
"epoch": 0.24,
"learning_rate": 1.9866570323002413e-05,
"loss": 2.7509,
"step": 130
},
{
"epoch": 0.26,
"learning_rate": 1.983168946516609e-05,
"loss": 2.5926,
"step": 140
},
{
"epoch": 0.28,
"learning_rate": 1.9792800918816745e-05,
"loss": 2.5292,
"step": 150
},
{
"epoch": 0.3,
"learning_rate": 1.9749920536080403e-05,
"loss": 2.6191,
"step": 160
},
{
"epoch": 0.32,
"learning_rate": 1.9703065796274124e-05,
"loss": 2.6252,
"step": 170
},
{
"epoch": 0.34,
"learning_rate": 1.965225579878089e-05,
"loss": 2.4386,
"step": 180
},
{
"epoch": 0.35,
"learning_rate": 1.959751125526415e-05,
"loss": 2.5224,
"step": 190
},
{
"epoch": 0.37,
"learning_rate": 1.9538854481225114e-05,
"loss": 2.4643,
"step": 200
},
{
"epoch": 0.39,
"learning_rate": 1.9476309386906303e-05,
"loss": 2.3863,
"step": 210
},
{
"epoch": 0.41,
"learning_rate": 1.9409901467545038e-05,
"loss": 2.331,
"step": 220
},
{
"epoch": 0.43,
"learning_rate": 1.933965779298078e-05,
"loss": 2.2992,
"step": 230
},
{
"epoch": 0.45,
"learning_rate": 1.9265606996620708e-05,
"loss": 2.4181,
"step": 240
},
{
"epoch": 0.47,
"learning_rate": 1.9187779263767875e-05,
"loss": 2.3913,
"step": 250
},
{
"epoch": 0.49,
"learning_rate": 1.9106206319316757e-05,
"loss": 2.3832,
"step": 260
},
{
"epoch": 0.5,
"learning_rate": 1.9020921414821294e-05,
"loss": 2.4269,
"step": 270
},
{
"epoch": 0.52,
"learning_rate": 1.8931959314940523e-05,
"loss": 2.2566,
"step": 280
},
{
"epoch": 0.54,
"learning_rate": 1.88393562832675e-05,
"loss": 2.316,
"step": 290
},
{
"epoch": 0.56,
"learning_rate": 1.8743150067547168e-05,
"loss": 2.2473,
"step": 300
},
{
"epoch": 0.58,
"learning_rate": 1.8643379884289266e-05,
"loss": 2.3063,
"step": 310
},
{
"epoch": 0.6,
"learning_rate": 1.85400864027825e-05,
"loss": 2.187,
"step": 320
},
{
"epoch": 0.62,
"learning_rate": 1.8433311728516553e-05,
"loss": 2.141,
"step": 330
},
{
"epoch": 0.63,
"learning_rate": 1.832309938601862e-05,
"loss": 2.2089,
"step": 340
},
{
"epoch": 0.65,
"learning_rate": 1.8209494301111535e-05,
"loss": 2.2493,
"step": 350
},
{
"epoch": 0.67,
"learning_rate": 1.809254278260066e-05,
"loss": 2.2365,
"step": 360
},
{
"epoch": 0.69,
"learning_rate": 1.7972292503397036e-05,
"loss": 2.2406,
"step": 370
},
{
"epoch": 0.71,
"learning_rate": 1.7848792481084504e-05,
"loss": 2.1423,
"step": 380
},
{
"epoch": 0.73,
"learning_rate": 1.772209305793868e-05,
"loss": 2.1684,
"step": 390
},
{
"epoch": 0.75,
"learning_rate": 1.7592245880405934e-05,
"loss": 2.1613,
"step": 400
},
{
"epoch": 0.77,
"learning_rate": 1.7459303878050788e-05,
"loss": 2.1061,
"step": 410
},
{
"epoch": 0.78,
"learning_rate": 1.732332124198023e-05,
"loss": 2.1551,
"step": 420
},
{
"epoch": 0.8,
"learning_rate": 1.7184353402753832e-05,
"loss": 2.1107,
"step": 430
},
{
"epoch": 0.82,
"learning_rate": 1.7042457007788594e-05,
"loss": 2.2264,
"step": 440
},
{
"epoch": 0.84,
"learning_rate": 1.6897689898267774e-05,
"loss": 2.0884,
"step": 450
},
{
"epoch": 0.86,
"learning_rate": 1.6750111085563122e-05,
"loss": 2.1016,
"step": 460
},
{
"epoch": 0.88,
"learning_rate": 1.659978072718007e-05,
"loss": 2.138,
"step": 470
},
{
"epoch": 0.9,
"learning_rate": 1.644676010223576e-05,
"loss": 2.0713,
"step": 480
},
{
"epoch": 0.91,
"learning_rate": 1.6291111586479857e-05,
"loss": 2.1247,
"step": 490
},
{
"epoch": 0.93,
"learning_rate": 1.6132898626868336e-05,
"loss": 2.0748,
"step": 500
},
{
"epoch": 0.95,
"learning_rate": 1.5972185715700623e-05,
"loss": 2.0356,
"step": 510
},
{
"epoch": 0.97,
"learning_rate": 1.5809038364330617e-05,
"loss": 2.1173,
"step": 520
},
{
"epoch": 0.99,
"learning_rate": 1.5643523076462325e-05,
"loss": 2.1065,
"step": 530
},
{
"epoch": 1.01,
"learning_rate": 1.547570732104095e-05,
"loss": 2.1073,
"step": 540
},
{
"epoch": 1.03,
"learning_rate": 1.5305659504750595e-05,
"loss": 1.9632,
"step": 550
},
{
"epoch": 1.05,
"learning_rate": 1.5133448944129616e-05,
"loss": 2.0967,
"step": 560
},
{
"epoch": 1.06,
"learning_rate": 1.4959145837315182e-05,
"loss": 1.9325,
"step": 570
},
{
"epoch": 1.08,
"learning_rate": 1.4782821235428406e-05,
"loss": 2.0018,
"step": 580
},
{
"epoch": 1.1,
"learning_rate": 1.4604547013611813e-05,
"loss": 1.9439,
"step": 590
},
{
"epoch": 1.12,
"learning_rate": 1.4424395841730889e-05,
"loss": 1.9066,
"step": 600
},
{
"epoch": 1.14,
"learning_rate": 1.4242441154751691e-05,
"loss": 1.8828,
"step": 610
},
{
"epoch": 1.16,
"learning_rate": 1.4058757122806573e-05,
"loss": 1.9665,
"step": 620
},
{
"epoch": 1.18,
"learning_rate": 1.3873418620960237e-05,
"loss": 1.9528,
"step": 630
},
{
"epoch": 1.19,
"learning_rate": 1.3686501198688435e-05,
"loss": 1.9044,
"step": 640
},
{
"epoch": 1.21,
"learning_rate": 1.3498081049081773e-05,
"loss": 1.8582,
"step": 650
},
{
"epoch": 1.23,
"learning_rate": 1.330823497778711e-05,
"loss": 2.0053,
"step": 660
},
{
"epoch": 1.25,
"learning_rate": 1.3117040371699334e-05,
"loss": 1.9191,
"step": 670
},
{
"epoch": 1.27,
"learning_rate": 1.2924575167416147e-05,
"loss": 1.9297,
"step": 680
},
{
"epoch": 1.29,
"learning_rate": 1.2730917819468796e-05,
"loss": 1.8852,
"step": 690
},
{
"epoch": 1.31,
"learning_rate": 1.2536147268341676e-05,
"loss": 1.9083,
"step": 700
},
{
"epoch": 1.33,
"learning_rate": 1.2340342908293856e-05,
"loss": 1.8909,
"step": 710
},
{
"epoch": 1.34,
"learning_rate": 1.2143584554995604e-05,
"loss": 1.8984,
"step": 720
},
{
"epoch": 1.36,
"learning_rate": 1.1945952412993162e-05,
"loss": 1.9207,
"step": 730
},
{
"epoch": 1.38,
"learning_rate": 1.1747527043014997e-05,
"loss": 1.8663,
"step": 740
},
{
"epoch": 1.4,
"learning_rate": 1.1548389329132832e-05,
"loss": 1.9107,
"step": 750
},
{
"epoch": 1.42,
"learning_rate": 1.1348620445790926e-05,
"loss": 1.9066,
"step": 760
},
{
"epoch": 1.44,
"learning_rate": 1.114830182471695e-05,
"loss": 1.8737,
"step": 770
},
{
"epoch": 1.46,
"learning_rate": 1.0947515121727987e-05,
"loss": 1.935,
"step": 780
},
{
"epoch": 1.47,
"learning_rate": 1.0746342183445209e-05,
"loss": 1.8357,
"step": 790
},
{
"epoch": 1.49,
"learning_rate": 1.0544865013930763e-05,
"loss": 1.8526,
"step": 800
},
{
"epoch": 1.51,
"learning_rate": 1.0343165741260474e-05,
"loss": 1.9319,
"step": 810
},
{
"epoch": 1.53,
"learning_rate": 1.0141326584046012e-05,
"loss": 1.9127,
"step": 820
},
{
"epoch": 1.55,
"learning_rate": 9.93942981792013e-06,
"loss": 1.906,
"step": 830
},
{
"epoch": 1.57,
"learning_rate": 9.737557741998696e-06,
"loss": 1.7907,
"step": 840
},
{
"epoch": 1.59,
"learning_rate": 9.535792645333114e-06,
"loss": 1.8328,
"step": 850
},
{
"epoch": 1.61,
"learning_rate": 9.334216773366863e-06,
"loss": 1.8959,
"step": 860
},
{
"epoch": 1.62,
"learning_rate": 9.132912294409814e-06,
"loss": 1.8743,
"step": 870
},
{
"epoch": 1.64,
"learning_rate": 8.931961266144003e-06,
"loss": 1.7924,
"step": 880
},
{
"epoch": 1.66,
"learning_rate": 8.731445602174463e-06,
"loss": 1.8917,
"step": 890
},
{
"epoch": 1.68,
"learning_rate": 8.531447038638812e-06,
"loss": 1.8711,
"step": 900
},
{
"epoch": 1.7,
"learning_rate": 8.33204710088918e-06,
"loss": 1.8876,
"step": 910
},
{
"epoch": 1.72,
"learning_rate": 8.133327070260064e-06,
"loss": 1.8171,
"step": 920
},
{
"epoch": 1.74,
"learning_rate": 7.935367950935623e-06,
"loss": 1.84,
"step": 930
},
{
"epoch": 1.75,
"learning_rate": 7.738250436929977e-06,
"loss": 1.9742,
"step": 940
},
{
"epoch": 1.77,
"learning_rate": 7.5420548791939406e-06,
"loss": 1.8994,
"step": 950
},
{
"epoch": 1.79,
"learning_rate": 7.346861252861577e-06,
"loss": 1.886,
"step": 960
},
{
"epoch": 1.81,
"learning_rate": 7.152749124649961e-06,
"loss": 1.8094,
"step": 970
},
{
"epoch": 1.83,
"learning_rate": 6.959797620425435e-06,
"loss": 1.8344,
"step": 980
},
{
"epoch": 1.85,
"learning_rate": 6.768085392949527e-06,
"loss": 1.8566,
"step": 990
},
{
"epoch": 1.87,
"learning_rate": 6.5776905898177936e-06,
"loss": 1.7879,
"step": 1000
},
{
"epoch": 1.89,
"learning_rate": 6.388690821604505e-06,
"loss": 1.8543,
"step": 1010
},
{
"epoch": 1.9,
"learning_rate": 6.20116313022628e-06,
"loss": 1.8772,
"step": 1020
},
{
"epoch": 1.92,
"learning_rate": 6.015183957537523e-06,
"loss": 1.8362,
"step": 1030
},
{
"epoch": 1.94,
"learning_rate": 5.830829114170442e-06,
"loss": 1.8778,
"step": 1040
},
{
"epoch": 1.96,
"learning_rate": 5.6481737486323936e-06,
"loss": 1.8676,
"step": 1050
},
{
"epoch": 1.98,
"learning_rate": 5.467292316673114e-06,
"loss": 1.8362,
"step": 1060
},
{
"epoch": 2.0,
"learning_rate": 5.288258550934343e-06,
"loss": 1.8752,
"step": 1070
},
{
"epoch": 2.02,
"learning_rate": 5.111145430894229e-06,
"loss": 1.9466,
"step": 1080
},
{
"epoch": 2.03,
"learning_rate": 4.936025153118697e-06,
"loss": 1.7668,
"step": 1090
},
{
"epoch": 2.05,
"learning_rate": 4.762969101832011e-06,
"loss": 1.7815,
"step": 1100
},
{
"epoch": 2.07,
"learning_rate": 4.59204781981843e-06,
"loss": 1.72,
"step": 1110
},
{
"epoch": 2.09,
"learning_rate": 4.423330979666878e-06,
"loss": 1.7548,
"step": 1120
},
{
"epoch": 2.11,
"learning_rate": 4.256887355370333e-06,
"loss": 1.6771,
"step": 1130
},
{
"epoch": 2.13,
"learning_rate": 4.092784794291502e-06,
"loss": 1.8401,
"step": 1140
},
{
"epoch": 2.15,
"learning_rate": 3.931090189506222e-06,
"loss": 1.7071,
"step": 1150
},
{
"epoch": 2.17,
"learning_rate": 3.7718694525358534e-06,
"loss": 1.8095,
"step": 1160
},
{
"epoch": 2.18,
"learning_rate": 3.615187486479794e-06,
"loss": 1.7441,
"step": 1170
},
{
"epoch": 2.2,
"learning_rate": 3.4611081595590445e-06,
"loss": 1.7304,
"step": 1180
},
{
"epoch": 2.22,
"learning_rate": 3.30969427908162e-06,
"loss": 1.7527,
"step": 1190
},
{
"epoch": 2.24,
"learning_rate": 3.1610075658404436e-06,
"loss": 1.8059,
"step": 1200
},
{
"epoch": 2.26,
"learning_rate": 3.0151086289540997e-06,
"loss": 1.6814,
"step": 1210
},
{
"epoch": 2.28,
"learning_rate": 2.872056941160767e-06,
"loss": 1.7992,
"step": 1220
},
{
"epoch": 2.3,
"learning_rate": 2.7319108145753625e-06,
"loss": 1.6453,
"step": 1230
},
{
"epoch": 2.31,
"learning_rate": 2.594727376919798e-06,
"loss": 1.6889,
"step": 1240
},
{
"epoch": 2.33,
"learning_rate": 2.460562548236015e-06,
"loss": 1.7476,
"step": 1250
},
{
"epoch": 2.35,
"learning_rate": 2.329471018091323e-06,
"loss": 1.7296,
"step": 1260
},
{
"epoch": 2.37,
"learning_rate": 2.201506223285306e-06,
"loss": 1.8352,
"step": 1270
},
{
"epoch": 2.39,
"learning_rate": 2.0767203260673952e-06,
"loss": 1.718,
"step": 1280
},
{
"epoch": 2.41,
"learning_rate": 1.9551641928739886e-06,
"loss": 1.7623,
"step": 1290
},
{
"epoch": 2.43,
"learning_rate": 1.836887373593782e-06,
"loss": 1.7442,
"step": 1300
},
{
"epoch": 2.45,
"learning_rate": 1.7219380813697605e-06,
"loss": 1.7989,
"step": 1310
},
{
"epoch": 2.46,
"learning_rate": 1.6103631729460978e-06,
"loss": 1.7406,
"step": 1320
},
{
"epoch": 2.48,
"learning_rate": 1.5022081295679436e-06,
"loss": 1.7743,
"step": 1330
},
{
"epoch": 2.5,
"learning_rate": 1.397517038441929e-06,
"loss": 1.7766,
"step": 1340
},
{
"epoch": 2.52,
"learning_rate": 1.296332574764907e-06,
"loss": 1.7424,
"step": 1350
},
{
"epoch": 2.54,
"learning_rate": 1.1986959843282753e-06,
"loss": 1.7627,
"step": 1360
},
{
"epoch": 2.56,
"learning_rate": 1.104647066704967e-06,
"loss": 1.7274,
"step": 1370
},
{
"epoch": 2.58,
"learning_rate": 1.0142241590259715e-06,
"loss": 1.713,
"step": 1380
},
{
"epoch": 2.59,
"learning_rate": 9.274641203529655e-07,
"loss": 1.8669,
"step": 1390
},
{
"epoch": 2.61,
"learning_rate": 8.444023166534798e-07,
"loss": 1.7232,
"step": 1400
},
{
"epoch": 2.63,
"learning_rate": 7.650726063846703e-07,
"loss": 1.7222,
"step": 1410
},
{
"epoch": 2.65,
"learning_rate": 6.895073266916164e-07,
"loss": 1.7192,
"step": 1420
},
{
"epoch": 2.67,
"learning_rate": 6.177372802257308e-07,
"loss": 1.7451,
"step": 1430
},
{
"epoch": 2.69,
"learning_rate": 5.497917225886918e-07,
"loss": 1.7798,
"step": 1440
},
{
"epoch": 2.71,
"learning_rate": 4.856983504069857e-07,
"loss": 1.7966,
"step": 1450
},
{
"epoch": 2.73,
"learning_rate": 4.254832900419437e-07,
"loss": 1.7129,
"step": 1460
},
{
"epoch": 2.74,
"learning_rate": 3.6917108693985684e-07,
"loss": 1.6862,
"step": 1470
},
{
"epoch": 2.76,
"learning_rate": 3.1678469562652856e-07,
"loss": 1.7033,
"step": 1480
},
{
"epoch": 2.78,
"learning_rate": 2.6834547035032634e-07,
"loss": 1.7667,
"step": 1490
},
{
"epoch": 2.8,
"learning_rate": 2.2387315637755535e-07,
"loss": 1.7763,
"step": 1500
},
{
"epoch": 2.82,
"learning_rate": 1.8338588194370176e-07,
"loss": 1.7684,
"step": 1510
},
{
"epoch": 2.84,
"learning_rate": 1.4690015086382793e-07,
"loss": 1.7335,
"step": 1520
},
{
"epoch": 2.86,
"learning_rate": 1.1443083580512382e-07,
"loss": 1.7949,
"step": 1530
},
{
"epoch": 2.87,
"learning_rate": 8.599117222436892e-08,
"loss": 1.7364,
"step": 1540
},
{
"epoch": 2.89,
"learning_rate": 6.159275297276557e-08,
"loss": 1.8143,
"step": 1550
},
{
"epoch": 2.91,
"learning_rate": 4.124552357035239e-08,
"loss": 1.68,
"step": 1560
},
{
"epoch": 2.93,
"learning_rate": 2.4957778151909472e-08,
"loss": 1.7381,
"step": 1570
},
{
"epoch": 2.95,
"learning_rate": 1.2736156086026186e-08,
"loss": 1.7679,
"step": 1580
},
{
"epoch": 2.97,
"learning_rate": 4.58563926869604e-09,
"loss": 1.7197,
"step": 1590
},
{
"epoch": 2.99,
"learning_rate": 5.095500925511588e-10,
"loss": 1.7629,
"step": 1600
},
{
"epoch": 3.0,
"step": 1605,
"total_flos": 1.1751074904303206e+17,
"train_loss": 2.0375683680501684,
"train_runtime": 2895.4272,
"train_samples_per_second": 62.165,
"train_steps_per_second": 0.554
}
],
"max_steps": 1605,
"num_train_epochs": 3,
"total_flos": 1.1751074904303206e+17,
"trial_name": null,
"trial_params": null
}