Yi-6B-ruozhiba-5e-4-50 / trainer_state.json
yyx123's picture
Model save
0441e20 verified
raw
history blame
No virus
40.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 4.5454545454545455e-06,
"loss": 2.3833,
"step": 1
},
{
"epoch": 0.07,
"learning_rate": 1.8181818181818182e-05,
"loss": 2.4734,
"step": 4
},
{
"epoch": 0.15,
"learning_rate": 3.6363636363636364e-05,
"loss": 2.2655,
"step": 8
},
{
"epoch": 0.22,
"learning_rate": 5.4545454545454546e-05,
"loss": 2.2091,
"step": 12
},
{
"epoch": 0.29,
"learning_rate": 7.272727272727273e-05,
"loss": 2.1358,
"step": 16
},
{
"epoch": 0.36,
"learning_rate": 9.090909090909092e-05,
"loss": 2.0997,
"step": 20
},
{
"epoch": 0.44,
"learning_rate": 0.00010909090909090909,
"loss": 1.931,
"step": 24
},
{
"epoch": 0.51,
"learning_rate": 0.00012727272727272725,
"loss": 2.0453,
"step": 28
},
{
"epoch": 0.58,
"learning_rate": 0.00014545454545454546,
"loss": 1.9392,
"step": 32
},
{
"epoch": 0.65,
"learning_rate": 0.00016363636363636363,
"loss": 1.8909,
"step": 36
},
{
"epoch": 0.73,
"learning_rate": 0.00018181818181818183,
"loss": 1.7724,
"step": 40
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 1.854,
"step": 44
},
{
"epoch": 0.87,
"learning_rate": 0.00021818181818181818,
"loss": 1.9588,
"step": 48
},
{
"epoch": 0.95,
"learning_rate": 0.00023636363636363636,
"loss": 1.8768,
"step": 52
},
{
"epoch": 1.0,
"gpt4_scores": 0.67,
"step": 55
},
{
"epoch": 1.0,
"std": 0.12413702106946178,
"step": 55
},
{
"epoch": 1.0,
"eval_loss": 1.84342360496521,
"eval_runtime": 4.9467,
"eval_samples_per_second": 4.65,
"eval_steps_per_second": 1.213,
"step": 55
},
{
"epoch": 1.02,
"learning_rate": 0.0002545454545454545,
"loss": 1.7451,
"step": 56
},
{
"epoch": 1.09,
"learning_rate": 0.00027272727272727274,
"loss": 1.7394,
"step": 60
},
{
"epoch": 1.16,
"learning_rate": 0.0002909090909090909,
"loss": 1.6971,
"step": 64
},
{
"epoch": 1.24,
"learning_rate": 0.0003090909090909091,
"loss": 1.6419,
"step": 68
},
{
"epoch": 1.31,
"learning_rate": 0.00032727272727272726,
"loss": 1.65,
"step": 72
},
{
"epoch": 1.38,
"learning_rate": 0.00034545454545454544,
"loss": 1.594,
"step": 76
},
{
"epoch": 1.45,
"learning_rate": 0.00036363636363636367,
"loss": 1.5985,
"step": 80
},
{
"epoch": 1.53,
"learning_rate": 0.00038181818181818184,
"loss": 1.719,
"step": 84
},
{
"epoch": 1.6,
"learning_rate": 0.0004,
"loss": 1.655,
"step": 88
},
{
"epoch": 1.67,
"learning_rate": 0.00041818181818181814,
"loss": 1.6052,
"step": 92
},
{
"epoch": 1.75,
"learning_rate": 0.00043636363636363637,
"loss": 1.6288,
"step": 96
},
{
"epoch": 1.82,
"learning_rate": 0.00045454545454545455,
"loss": 1.6272,
"step": 100
},
{
"epoch": 1.89,
"learning_rate": 0.0004727272727272727,
"loss": 1.6124,
"step": 104
},
{
"epoch": 1.96,
"learning_rate": 0.0004909090909090909,
"loss": 1.5109,
"step": 108
},
{
"epoch": 2.0,
"gpt4_scores": 0.675,
"step": 110
},
{
"epoch": 2.0,
"std": 0.11688669727560959,
"step": 110
},
{
"epoch": 2.0,
"eval_loss": 1.89065682888031,
"eval_runtime": 4.9546,
"eval_samples_per_second": 4.642,
"eval_steps_per_second": 1.211,
"step": 110
},
{
"epoch": 2.04,
"learning_rate": 0.0004999949650182266,
"loss": 1.3323,
"step": 112
},
{
"epoch": 2.11,
"learning_rate": 0.0004999546863808815,
"loss": 1.0715,
"step": 116
},
{
"epoch": 2.18,
"learning_rate": 0.0004998741355957963,
"loss": 0.8812,
"step": 120
},
{
"epoch": 2.25,
"learning_rate": 0.0004997533256411359,
"loss": 0.9464,
"step": 124
},
{
"epoch": 2.33,
"learning_rate": 0.0004995922759815339,
"loss": 0.9049,
"step": 128
},
{
"epoch": 2.4,
"learning_rate": 0.0004993910125649561,
"loss": 0.9246,
"step": 132
},
{
"epoch": 2.47,
"learning_rate": 0.0004991495678185201,
"loss": 1.0853,
"step": 136
},
{
"epoch": 2.55,
"learning_rate": 0.0004988679806432712,
"loss": 1.1289,
"step": 140
},
{
"epoch": 2.62,
"learning_rate": 0.0004985462964079136,
"loss": 0.9598,
"step": 144
},
{
"epoch": 2.69,
"learning_rate": 0.0004981845669415021,
"loss": 0.8825,
"step": 148
},
{
"epoch": 2.76,
"learning_rate": 0.0004977828505250904,
"loss": 0.985,
"step": 152
},
{
"epoch": 2.84,
"learning_rate": 0.0004973412118823412,
"loss": 0.9869,
"step": 156
},
{
"epoch": 2.91,
"learning_rate": 0.0004968597221690986,
"loss": 0.985,
"step": 160
},
{
"epoch": 2.98,
"learning_rate": 0.0004963384589619233,
"loss": 1.1215,
"step": 164
},
{
"epoch": 3.0,
"gpt4_scores": 0.67,
"step": 165
},
{
"epoch": 3.0,
"std": 0.11920570456148481,
"step": 165
},
{
"epoch": 3.0,
"eval_loss": 2.164942741394043,
"eval_runtime": 4.9498,
"eval_samples_per_second": 4.647,
"eval_steps_per_second": 1.212,
"step": 165
},
{
"epoch": 3.05,
"learning_rate": 0.0004957775062455933,
"loss": 0.8362,
"step": 168
},
{
"epoch": 3.13,
"learning_rate": 0.0004951769543995731,
"loss": 0.3812,
"step": 172
},
{
"epoch": 3.2,
"learning_rate": 0.0004945369001834514,
"loss": 0.4813,
"step": 176
},
{
"epoch": 3.27,
"learning_rate": 0.0004938574467213517,
"loss": 0.4693,
"step": 180
},
{
"epoch": 3.35,
"learning_rate": 0.0004931387034853173,
"loss": 0.447,
"step": 184
},
{
"epoch": 3.42,
"learning_rate": 0.0004923807862776728,
"loss": 0.6197,
"step": 188
},
{
"epoch": 3.49,
"learning_rate": 0.0004915838172123671,
"loss": 0.5158,
"step": 192
},
{
"epoch": 3.56,
"learning_rate": 0.0004907479246952981,
"loss": 0.5785,
"step": 196
},
{
"epoch": 3.64,
"learning_rate": 0.0004898732434036243,
"loss": 0.5258,
"step": 200
},
{
"epoch": 3.71,
"learning_rate": 0.0004889599142640663,
"loss": 0.4577,
"step": 204
},
{
"epoch": 3.78,
"learning_rate": 0.0004880080844302004,
"loss": 0.5965,
"step": 208
},
{
"epoch": 3.85,
"learning_rate": 0.0004870179072587499,
"loss": 0.5373,
"step": 212
},
{
"epoch": 3.93,
"learning_rate": 0.0004859895422848767,
"loss": 0.4731,
"step": 216
},
{
"epoch": 4.0,
"learning_rate": 0.0004849231551964771,
"loss": 0.4876,
"step": 220
},
{
"epoch": 4.0,
"gpt4_scores": 0.61,
"step": 220
},
{
"epoch": 4.0,
"std": 0.12445882853377657,
"step": 220
},
{
"epoch": 4.0,
"eval_loss": 2.6900253295898438,
"eval_runtime": 4.9381,
"eval_samples_per_second": 4.658,
"eval_steps_per_second": 1.215,
"step": 220
},
{
"epoch": 4.07,
"learning_rate": 0.00048381891780748665,
"loss": 0.288,
"step": 224
},
{
"epoch": 4.15,
"learning_rate": 0.00048267700803019775,
"loss": 0.2556,
"step": 228
},
{
"epoch": 4.22,
"learning_rate": 0.0004814976098465951,
"loss": 0.2897,
"step": 232
},
{
"epoch": 4.29,
"learning_rate": 0.00048028091327871256,
"loss": 0.2375,
"step": 236
},
{
"epoch": 4.36,
"learning_rate": 0.0004790271143580174,
"loss": 0.2367,
"step": 240
},
{
"epoch": 4.44,
"learning_rate": 0.00047773641509382626,
"loss": 0.2878,
"step": 244
},
{
"epoch": 4.51,
"learning_rate": 0.0004764090234407577,
"loss": 0.3138,
"step": 248
},
{
"epoch": 4.58,
"learning_rate": 0.00047504515326522696,
"loss": 0.277,
"step": 252
},
{
"epoch": 4.65,
"learning_rate": 0.0004736450243109884,
"loss": 0.2624,
"step": 256
},
{
"epoch": 4.73,
"learning_rate": 0.0004722088621637309,
"loss": 0.2888,
"step": 260
},
{
"epoch": 4.8,
"learning_rate": 0.00047073689821473173,
"loss": 0.2256,
"step": 264
},
{
"epoch": 4.87,
"learning_rate": 0.00046922936962357577,
"loss": 0.2243,
"step": 268
},
{
"epoch": 4.95,
"learning_rate": 0.00046768651927994433,
"loss": 0.2916,
"step": 272
},
{
"epoch": 5.0,
"gpt4_scores": 0.63,
"step": 275
},
{
"epoch": 5.0,
"std": 0.12573782247199924,
"step": 275
},
{
"epoch": 5.0,
"eval_loss": 2.821211099624634,
"eval_runtime": 4.9351,
"eval_samples_per_second": 4.661,
"eval_steps_per_second": 1.216,
"step": 275
},
{
"epoch": 5.02,
"learning_rate": 0.0004661085957644817,
"loss": 0.233,
"step": 276
},
{
"epoch": 5.09,
"learning_rate": 0.0004644958533087443,
"loss": 0.1537,
"step": 280
},
{
"epoch": 5.16,
"learning_rate": 0.0004628485517542392,
"loss": 0.1743,
"step": 284
},
{
"epoch": 5.24,
"learning_rate": 0.0004611669565105596,
"loss": 0.1827,
"step": 288
},
{
"epoch": 5.31,
"learning_rate": 0.00045945133851262184,
"loss": 0.1928,
"step": 292
},
{
"epoch": 5.38,
"learning_rate": 0.00045770197417701366,
"loss": 0.1718,
"step": 296
},
{
"epoch": 5.45,
"learning_rate": 0.0004559191453574582,
"loss": 0.166,
"step": 300
},
{
"epoch": 5.53,
"learning_rate": 0.00045410313929940244,
"loss": 0.1484,
"step": 304
},
{
"epoch": 5.6,
"learning_rate": 0.0004522542485937369,
"loss": 0.1521,
"step": 308
},
{
"epoch": 5.67,
"learning_rate": 0.00045037277112965383,
"loss": 0.195,
"step": 312
},
{
"epoch": 5.75,
"learning_rate": 0.0004484590100466523,
"loss": 0.153,
"step": 316
},
{
"epoch": 5.82,
"learning_rate": 0.0004465132736856969,
"loss": 0.1751,
"step": 320
},
{
"epoch": 5.89,
"learning_rate": 0.0004445358755395382,
"loss": 0.2153,
"step": 324
},
{
"epoch": 5.96,
"learning_rate": 0.00044252713420220394,
"loss": 0.1983,
"step": 328
},
{
"epoch": 6.0,
"gpt4_scores": 0.7500000000000001,
"step": 330
},
{
"epoch": 6.0,
"std": 0.0972111104761179,
"step": 330
},
{
"epoch": 6.0,
"eval_loss": 2.8983585834503174,
"eval_runtime": 4.9684,
"eval_samples_per_second": 4.629,
"eval_steps_per_second": 1.208,
"step": 330
},
{
"epoch": 6.04,
"learning_rate": 0.0004404873733176677,
"loss": 0.1353,
"step": 332
},
{
"epoch": 6.11,
"learning_rate": 0.00043841692152770415,
"loss": 0.102,
"step": 336
},
{
"epoch": 6.18,
"learning_rate": 0.0004363161124189387,
"loss": 0.1055,
"step": 340
},
{
"epoch": 6.25,
"learning_rate": 0.00043418528446910123,
"loss": 0.0998,
"step": 344
},
{
"epoch": 6.33,
"learning_rate": 0.00043202478099249104,
"loss": 0.1168,
"step": 348
},
{
"epoch": 6.4,
"learning_rate": 0.0004298349500846628,
"loss": 0.1156,
"step": 352
},
{
"epoch": 6.47,
"learning_rate": 0.00042761614456634226,
"loss": 0.1345,
"step": 356
},
{
"epoch": 6.55,
"learning_rate": 0.00042536872192658034,
"loss": 0.0968,
"step": 360
},
{
"epoch": 6.62,
"learning_rate": 0.0004230930442651557,
"loss": 0.1179,
"step": 364
},
{
"epoch": 6.69,
"learning_rate": 0.00042078947823423365,
"loss": 0.0959,
"step": 368
},
{
"epoch": 6.76,
"learning_rate": 0.00041845839497929203,
"loss": 0.1078,
"step": 372
},
{
"epoch": 6.84,
"learning_rate": 0.0004161001700793231,
"loss": 0.1175,
"step": 376
},
{
"epoch": 6.91,
"learning_rate": 0.0004137151834863213,
"loss": 0.1095,
"step": 380
},
{
"epoch": 6.98,
"learning_rate": 0.00041130381946406574,
"loss": 0.1199,
"step": 384
},
{
"epoch": 7.0,
"gpt4_scores": 0.49000000000000005,
"step": 385
},
{
"epoch": 7.0,
"std": 0.12525973016097391,
"step": 385
},
{
"epoch": 7.0,
"eval_loss": 2.949794292449951,
"eval_runtime": 4.9646,
"eval_samples_per_second": 4.633,
"eval_steps_per_second": 1.209,
"step": 385
},
{
"epoch": 7.05,
"learning_rate": 0.0004088664665262091,
"loss": 0.076,
"step": 388
},
{
"epoch": 7.13,
"learning_rate": 0.0004064035173736804,
"loss": 0.0709,
"step": 392
},
{
"epoch": 7.2,
"learning_rate": 0.00040391536883141455,
"loss": 0.073,
"step": 396
},
{
"epoch": 7.27,
"learning_rate": 0.00040140242178441667,
"loss": 0.0819,
"step": 400
},
{
"epoch": 7.35,
"learning_rate": 0.000398865081113172,
"loss": 0.0643,
"step": 404
},
{
"epoch": 7.42,
"learning_rate": 0.0003963037556284129,
"loss": 0.1603,
"step": 408
},
{
"epoch": 7.49,
"learning_rate": 0.0003937188580052518,
"loss": 0.119,
"step": 412
},
{
"epoch": 7.56,
"learning_rate": 0.0003911108047166924,
"loss": 0.0754,
"step": 416
},
{
"epoch": 7.64,
"learning_rate": 0.0003884800159665276,
"loss": 0.0622,
"step": 420
},
{
"epoch": 7.71,
"learning_rate": 0.00038582691562163827,
"loss": 0.0711,
"step": 424
},
{
"epoch": 7.78,
"learning_rate": 0.00038315193114369994,
"loss": 0.0643,
"step": 428
},
{
"epoch": 7.85,
"learning_rate": 0.0003804554935203115,
"loss": 0.0672,
"step": 432
},
{
"epoch": 7.93,
"learning_rate": 0.00037773803719555514,
"loss": 0.0688,
"step": 436
},
{
"epoch": 8.0,
"learning_rate": 0.000375,
"loss": 0.0743,
"step": 440
},
{
"epoch": 8.0,
"gpt4_scores": 0.51,
"step": 440
},
{
"epoch": 8.0,
"std": 0.12763228431709586,
"step": 440
},
{
"epoch": 8.0,
"eval_loss": 3.250723123550415,
"eval_runtime": 4.9396,
"eval_samples_per_second": 4.656,
"eval_steps_per_second": 1.215,
"step": 440
},
{
"epoch": 8.07,
"learning_rate": 0.00037224182308015974,
"loss": 0.0528,
"step": 444
},
{
"epoch": 8.15,
"learning_rate": 0.0003694639508274158,
"loss": 0.0502,
"step": 448
},
{
"epoch": 8.22,
"learning_rate": 0.00036666683080641843,
"loss": 0.0659,
"step": 452
},
{
"epoch": 8.29,
"learning_rate": 0.0003638509136829758,
"loss": 0.065,
"step": 456
},
{
"epoch": 8.36,
"learning_rate": 0.00036101665315144355,
"loss": 0.0524,
"step": 460
},
{
"epoch": 8.44,
"learning_rate": 0.00035816450586162706,
"loss": 0.0565,
"step": 464
},
{
"epoch": 8.51,
"learning_rate": 0.00035529493134520666,
"loss": 0.0432,
"step": 468
},
{
"epoch": 8.58,
"learning_rate": 0.00035240839194169884,
"loss": 0.0582,
"step": 472
},
{
"epoch": 8.65,
"learning_rate": 0.0003495053527239656,
"loss": 0.0507,
"step": 476
},
{
"epoch": 8.73,
"learning_rate": 0.00034658628142328216,
"loss": 0.0571,
"step": 480
},
{
"epoch": 8.8,
"learning_rate": 0.00034365164835397803,
"loss": 0.0925,
"step": 484
},
{
"epoch": 8.87,
"learning_rate": 0.00034070192633766023,
"loss": 0.0601,
"step": 488
},
{
"epoch": 8.95,
"learning_rate": 0.00033773759062703394,
"loss": 0.062,
"step": 492
},
{
"epoch": 9.0,
"gpt4_scores": 0.53,
"step": 495
},
{
"epoch": 9.0,
"std": 0.1192057045614848,
"step": 495
},
{
"epoch": 9.0,
"eval_loss": 3.1474077701568604,
"eval_runtime": 4.9704,
"eval_samples_per_second": 4.627,
"eval_steps_per_second": 1.207,
"step": 495
},
{
"epoch": 9.02,
"learning_rate": 0.0003347591188293301,
"loss": 0.0535,
"step": 496
},
{
"epoch": 9.09,
"learning_rate": 0.00033176699082935546,
"loss": 0.0484,
"step": 500
},
{
"epoch": 9.16,
"learning_rate": 0.00032876168871217323,
"loss": 0.0541,
"step": 504
},
{
"epoch": 9.24,
"learning_rate": 0.00032574369668543187,
"loss": 0.0438,
"step": 508
},
{
"epoch": 9.31,
"learning_rate": 0.00032271350100134975,
"loss": 0.0451,
"step": 512
},
{
"epoch": 9.38,
"learning_rate": 0.00031967158987837195,
"loss": 0.0403,
"step": 516
},
{
"epoch": 9.45,
"learning_rate": 0.0003166184534225087,
"loss": 0.0517,
"step": 520
},
{
"epoch": 9.53,
"learning_rate": 0.0003135545835483718,
"loss": 0.0498,
"step": 524
},
{
"epoch": 9.6,
"learning_rate": 0.0003104804738999169,
"loss": 0.0516,
"step": 528
},
{
"epoch": 9.67,
"learning_rate": 0.00030739661977091025,
"loss": 0.0487,
"step": 532
},
{
"epoch": 9.75,
"learning_rate": 0.00030430351802512693,
"loss": 0.045,
"step": 536
},
{
"epoch": 9.82,
"learning_rate": 0.0003012016670162977,
"loss": 0.0397,
"step": 540
},
{
"epoch": 9.89,
"learning_rate": 0.00029809156650781527,
"loss": 0.0484,
"step": 544
},
{
"epoch": 9.96,
"learning_rate": 0.0002949737175922135,
"loss": 0.0404,
"step": 548
},
{
"epoch": 10.0,
"gpt4_scores": 0.6749999999999999,
"step": 550
},
{
"epoch": 10.0,
"std": 0.11602801385872293,
"step": 550
},
{
"epoch": 10.0,
"eval_loss": 3.3666398525238037,
"eval_runtime": 4.9475,
"eval_samples_per_second": 4.649,
"eval_steps_per_second": 1.213,
"step": 550
},
{
"epoch": 10.04,
"learning_rate": 0.0002918486226104327,
"loss": 0.0403,
"step": 552
},
{
"epoch": 10.11,
"learning_rate": 0.0002887167850708831,
"loss": 0.0401,
"step": 556
},
{
"epoch": 10.18,
"learning_rate": 0.00028557870956832135,
"loss": 0.0442,
"step": 560
},
{
"epoch": 10.25,
"learning_rate": 0.00028243490170255044,
"loss": 0.041,
"step": 564
},
{
"epoch": 10.33,
"learning_rate": 0.0002792858679969596,
"loss": 0.036,
"step": 568
},
{
"epoch": 10.4,
"learning_rate": 0.0002761321158169134,
"loss": 0.0533,
"step": 572
},
{
"epoch": 10.47,
"learning_rate": 0.0002729741532880069,
"loss": 0.0506,
"step": 576
},
{
"epoch": 10.55,
"learning_rate": 0.0002698124892141971,
"loss": 0.0532,
"step": 580
},
{
"epoch": 10.62,
"learning_rate": 0.000266647632995826,
"loss": 0.0468,
"step": 584
},
{
"epoch": 10.69,
"learning_rate": 0.0002634800945475465,
"loss": 0.0396,
"step": 588
},
{
"epoch": 10.76,
"learning_rate": 0.00026031038421616684,
"loss": 0.0383,
"step": 592
},
{
"epoch": 10.84,
"learning_rate": 0.00025713901269842405,
"loss": 0.0475,
"step": 596
},
{
"epoch": 10.91,
"learning_rate": 0.000253966490958702,
"loss": 0.0491,
"step": 600
},
{
"epoch": 10.98,
"learning_rate": 0.00025079333014670557,
"loss": 0.038,
"step": 604
},
{
"epoch": 11.0,
"gpt4_scores": 0.63,
"step": 605
},
{
"epoch": 11.0,
"std": 0.11580155439371269,
"step": 605
},
{
"epoch": 11.0,
"eval_loss": 3.3349289894104004,
"eval_runtime": 4.9384,
"eval_samples_per_second": 4.657,
"eval_steps_per_second": 1.215,
"step": 605
},
{
"epoch": 11.05,
"learning_rate": 0.00024762004151510585,
"loss": 0.0482,
"step": 608
},
{
"epoch": 11.13,
"learning_rate": 0.00024444713633716764,
"loss": 0.0356,
"step": 612
},
{
"epoch": 11.2,
"learning_rate": 0.00024127512582437484,
"loss": 0.0444,
"step": 616
},
{
"epoch": 11.27,
"learning_rate": 0.00023810452104406444,
"loss": 0.0411,
"step": 620
},
{
"epoch": 11.35,
"learning_rate": 0.00023493583283708543,
"loss": 0.0441,
"step": 624
},
{
"epoch": 11.42,
"learning_rate": 0.00023176957173549233,
"loss": 0.0393,
"step": 628
},
{
"epoch": 11.49,
"learning_rate": 0.00022860624788029015,
"loss": 0.0363,
"step": 632
},
{
"epoch": 11.56,
"learning_rate": 0.00022544637093924072,
"loss": 0.0393,
"step": 636
},
{
"epoch": 11.64,
"learning_rate": 0.00022229045002474727,
"loss": 0.0452,
"step": 640
},
{
"epoch": 11.71,
"learning_rate": 0.00021913899361182632,
"loss": 0.0417,
"step": 644
},
{
"epoch": 11.78,
"learning_rate": 0.000215992509456184,
"loss": 0.0366,
"step": 648
},
{
"epoch": 11.85,
"learning_rate": 0.00021285150451240712,
"loss": 0.0443,
"step": 652
},
{
"epoch": 11.93,
"learning_rate": 0.000209716484852284,
"loss": 0.0447,
"step": 656
},
{
"epoch": 12.0,
"learning_rate": 0.00020658795558326743,
"loss": 0.0374,
"step": 660
},
{
"epoch": 12.0,
"gpt4_scores": 0.6100000000000001,
"step": 660
},
{
"epoch": 12.0,
"std": 0.12841339493993606,
"step": 660
},
{
"epoch": 12.0,
"eval_loss": 3.4455862045288086,
"eval_runtime": 4.9572,
"eval_samples_per_second": 4.64,
"eval_steps_per_second": 1.21,
"step": 660
},
{
"epoch": 12.07,
"learning_rate": 0.0002034664207670925,
"loss": 0.0442,
"step": 664
},
{
"epoch": 12.15,
"learning_rate": 0.00020035238333856371,
"loss": 0.0391,
"step": 668
},
{
"epoch": 12.22,
"learning_rate": 0.0001972463450245226,
"loss": 0.0392,
"step": 672
},
{
"epoch": 12.29,
"learning_rate": 0.00019414880626301146,
"loss": 0.0426,
"step": 676
},
{
"epoch": 12.36,
"learning_rate": 0.00019106026612264316,
"loss": 0.043,
"step": 680
},
{
"epoch": 12.44,
"learning_rate": 0.0001879812222221929,
"loss": 0.0395,
"step": 684
},
{
"epoch": 12.51,
"learning_rate": 0.00018491217065042198,
"loss": 0.0396,
"step": 688
},
{
"epoch": 12.58,
"learning_rate": 0.00018185360588615057,
"loss": 0.0394,
"step": 692
},
{
"epoch": 12.65,
"learning_rate": 0.00017880602071858692,
"loss": 0.0403,
"step": 696
},
{
"epoch": 12.73,
"learning_rate": 0.00017576990616793137,
"loss": 0.0349,
"step": 700
},
{
"epoch": 12.8,
"learning_rate": 0.00017274575140626317,
"loss": 0.0404,
"step": 704
},
{
"epoch": 12.87,
"learning_rate": 0.0001697340436787273,
"loss": 0.0344,
"step": 708
},
{
"epoch": 12.95,
"learning_rate": 0.00016673526822502983,
"loss": 0.0384,
"step": 712
},
{
"epoch": 13.0,
"gpt4_scores": 0.53,
"step": 715
},
{
"epoch": 13.0,
"std": 0.13787675656179324,
"step": 715
},
{
"epoch": 13.0,
"eval_loss": 3.4822309017181396,
"eval_runtime": 4.946,
"eval_samples_per_second": 4.65,
"eval_steps_per_second": 1.213,
"step": 715
},
{
"epoch": 13.02,
"learning_rate": 0.0001637499082012574,
"loss": 0.0522,
"step": 716
},
{
"epoch": 13.09,
"learning_rate": 0.00016077844460203207,
"loss": 0.0384,
"step": 720
},
{
"epoch": 13.16,
"learning_rate": 0.00015782135618301485,
"loss": 0.0336,
"step": 724
},
{
"epoch": 13.24,
"learning_rate": 0.00015487911938376925,
"loss": 0.0401,
"step": 728
},
{
"epoch": 13.31,
"learning_rate": 0.00015195220825099862,
"loss": 0.0395,
"step": 732
},
{
"epoch": 13.38,
"learning_rate": 0.00014904109436216883,
"loss": 0.0414,
"step": 736
},
{
"epoch": 13.45,
"learning_rate": 0.0001461462467495284,
"loss": 0.0393,
"step": 740
},
{
"epoch": 13.53,
"learning_rate": 0.00014326813182453956,
"loss": 0.0383,
"step": 744
},
{
"epoch": 13.6,
"learning_rate": 0.00014040721330273062,
"loss": 0.038,
"step": 748
},
{
"epoch": 13.67,
"learning_rate": 0.0001375639521289836,
"loss": 0.0383,
"step": 752
},
{
"epoch": 13.75,
"learning_rate": 0.00013473880640326724,
"loss": 0.0405,
"step": 756
},
{
"epoch": 13.82,
"learning_rate": 0.00013193223130682935,
"loss": 0.0422,
"step": 760
},
{
"epoch": 13.89,
"learning_rate": 0.000129144679028859,
"loss": 0.0408,
"step": 764
},
{
"epoch": 13.96,
"learning_rate": 0.00012637659869363084,
"loss": 0.0408,
"step": 768
},
{
"epoch": 14.0,
"gpt4_scores": 0.6500000000000001,
"step": 770
},
{
"epoch": 14.0,
"std": 0.11423659658795862,
"step": 770
},
{
"epoch": 14.0,
"eval_loss": 3.471827983856201,
"eval_runtime": 4.9416,
"eval_samples_per_second": 4.654,
"eval_steps_per_second": 1.214,
"step": 770
},
{
"epoch": 14.04,
"learning_rate": 0.00012362843628814266,
"loss": 0.0343,
"step": 772
},
{
"epoch": 14.11,
"learning_rate": 0.00012090063459025954,
"loss": 0.0386,
"step": 776
},
{
"epoch": 14.18,
"learning_rate": 0.00011819363309737438,
"loss": 0.0412,
"step": 780
},
{
"epoch": 14.25,
"learning_rate": 0.0001155078679555969,
"loss": 0.0402,
"step": 784
},
{
"epoch": 14.33,
"learning_rate": 0.00011284377188948258,
"loss": 0.038,
"step": 788
},
{
"epoch": 14.4,
"learning_rate": 0.00011020177413231333,
"loss": 0.0367,
"step": 792
},
{
"epoch": 14.47,
"learning_rate": 0.0001075823003569403,
"loss": 0.0449,
"step": 796
},
{
"epoch": 14.55,
"learning_rate": 0.00010498577260720049,
"loss": 0.036,
"step": 800
},
{
"epoch": 14.62,
"learning_rate": 0.00010241260922991761,
"loss": 0.0385,
"step": 804
},
{
"epoch": 14.69,
"learning_rate": 9.986322480749927e-05,
"loss": 0.0428,
"step": 808
},
{
"epoch": 14.76,
"learning_rate": 9.733803009114044e-05,
"loss": 0.0375,
"step": 812
},
{
"epoch": 14.84,
"learning_rate": 9.483743193464408e-05,
"loss": 0.0435,
"step": 816
},
{
"epoch": 14.91,
"learning_rate": 9.236183322886945e-05,
"loss": 0.0394,
"step": 820
},
{
"epoch": 14.98,
"learning_rate": 8.991163283681945e-05,
"loss": 0.0347,
"step": 824
},
{
"epoch": 15.0,
"gpt4_scores": 0.67,
"step": 825
},
{
"epoch": 15.0,
"std": 0.11644741302407709,
"step": 825
},
{
"epoch": 15.0,
"eval_loss": 3.502847671508789,
"eval_runtime": 4.9425,
"eval_samples_per_second": 4.654,
"eval_steps_per_second": 1.214,
"step": 825
},
{
"epoch": 15.05,
"learning_rate": 8.748722552937688e-05,
"loss": 0.0395,
"step": 828
},
{
"epoch": 15.13,
"learning_rate": 8.508900192169963e-05,
"loss": 0.0389,
"step": 832
},
{
"epoch": 15.2,
"learning_rate": 8.271734841028553e-05,
"loss": 0.0425,
"step": 836
},
{
"epoch": 15.27,
"learning_rate": 8.037264711071699e-05,
"loss": 0.0403,
"step": 840
},
{
"epoch": 15.35,
"learning_rate": 7.805527579609576e-05,
"loss": 0.0405,
"step": 844
},
{
"epoch": 15.42,
"learning_rate": 7.576560783617667e-05,
"loss": 0.0392,
"step": 848
},
{
"epoch": 15.49,
"learning_rate": 7.35040121372109e-05,
"loss": 0.0383,
"step": 852
},
{
"epoch": 15.56,
"learning_rate": 7.127085308250913e-05,
"loss": 0.0404,
"step": 856
},
{
"epoch": 15.64,
"learning_rate": 6.906649047373245e-05,
"loss": 0.0347,
"step": 860
},
{
"epoch": 15.71,
"learning_rate": 6.689127947292231e-05,
"loss": 0.0363,
"step": 864
},
{
"epoch": 15.78,
"learning_rate": 6.474557054527707e-05,
"loss": 0.0341,
"step": 868
},
{
"epoch": 15.85,
"learning_rate": 6.262970940268654e-05,
"loss": 0.0377,
"step": 872
},
{
"epoch": 15.93,
"learning_rate": 6.054403694803079e-05,
"loss": 0.0442,
"step": 876
},
{
"epoch": 16.0,
"learning_rate": 5.848888922025553e-05,
"loss": 0.0377,
"step": 880
},
{
"epoch": 16.0,
"gpt4_scores": 0.5700000000000001,
"step": 880
},
{
"epoch": 16.0,
"std": 0.1319469590403659,
"step": 880
},
{
"epoch": 16.0,
"eval_loss": 3.5217506885528564,
"eval_runtime": 4.9355,
"eval_samples_per_second": 4.66,
"eval_steps_per_second": 1.216,
"step": 880
},
{
"epoch": 16.07,
"learning_rate": 5.646459734022938e-05,
"loss": 0.041,
"step": 884
},
{
"epoch": 16.15,
"learning_rate": 5.4471487457395216e-05,
"loss": 0.0333,
"step": 888
},
{
"epoch": 16.22,
"learning_rate": 5.2509880697220956e-05,
"loss": 0.0407,
"step": 892
},
{
"epoch": 16.29,
"learning_rate": 5.058009310946118e-05,
"loss": 0.0391,
"step": 896
},
{
"epoch": 16.36,
"learning_rate": 4.8682435617235344e-05,
"loss": 0.0365,
"step": 900
},
{
"epoch": 16.44,
"learning_rate": 4.6817213966933034e-05,
"loss": 0.0425,
"step": 904
},
{
"epoch": 16.51,
"learning_rate": 4.498472867895223e-05,
"loss": 0.0363,
"step": 908
},
{
"epoch": 16.58,
"learning_rate": 4.318527499928074e-05,
"loss": 0.0391,
"step": 912
},
{
"epoch": 16.65,
"learning_rate": 4.141914285192619e-05,
"loss": 0.0375,
"step": 916
},
{
"epoch": 16.73,
"learning_rate": 3.968661679220467e-05,
"loss": 0.0388,
"step": 920
},
{
"epoch": 16.8,
"learning_rate": 3.798797596089351e-05,
"loss": 0.0417,
"step": 924
},
{
"epoch": 16.87,
"learning_rate": 3.632349403925664e-05,
"loss": 0.0378,
"step": 928
},
{
"epoch": 16.95,
"learning_rate": 3.4693439204949856e-05,
"loss": 0.0395,
"step": 932
},
{
"epoch": 17.0,
"gpt4_scores": 0.595,
"step": 935
},
{
"epoch": 17.0,
"std": 0.1132364782214636,
"step": 935
},
{
"epoch": 17.0,
"eval_loss": 3.5319790840148926,
"eval_runtime": 4.9408,
"eval_samples_per_second": 4.655,
"eval_steps_per_second": 1.214,
"step": 935
},
{
"epoch": 17.02,
"learning_rate": 3.309807408881269e-05,
"loss": 0.0416,
"step": 936
},
{
"epoch": 17.09,
"learning_rate": 3.1537655732553766e-05,
"loss": 0.0409,
"step": 940
},
{
"epoch": 17.16,
"learning_rate": 3.0012435547336736e-05,
"loss": 0.0412,
"step": 944
},
{
"epoch": 17.24,
"learning_rate": 2.8522659273273603e-05,
"loss": 0.0411,
"step": 948
},
{
"epoch": 17.31,
"learning_rate": 2.7068566939831645e-05,
"loss": 0.0332,
"step": 952
},
{
"epoch": 17.38,
"learning_rate": 2.5650392827160445e-05,
"loss": 0.0315,
"step": 956
},
{
"epoch": 17.45,
"learning_rate": 2.4268365428344735e-05,
"loss": 0.0448,
"step": 960
},
{
"epoch": 17.53,
"learning_rate": 2.29227074125907e-05,
"loss": 0.0349,
"step": 964
},
{
"epoch": 17.6,
"learning_rate": 2.1613635589349755e-05,
"loss": 0.0367,
"step": 968
},
{
"epoch": 17.67,
"learning_rate": 2.0341360873386672e-05,
"loss": 0.0386,
"step": 972
},
{
"epoch": 17.75,
"learning_rate": 1.9106088250797264e-05,
"loss": 0.0371,
"step": 976
},
{
"epoch": 17.82,
"learning_rate": 1.7908016745981858e-05,
"loss": 0.0369,
"step": 980
},
{
"epoch": 17.89,
"learning_rate": 1.674733938957873e-05,
"loss": 0.0464,
"step": 984
},
{
"epoch": 17.96,
"learning_rate": 1.562424318736344e-05,
"loss": 0.0408,
"step": 988
},
{
"epoch": 18.0,
"gpt4_scores": 0.59,
"step": 990
},
{
"epoch": 18.0,
"std": 0.13224976370489286,
"step": 990
},
{
"epoch": 18.0,
"eval_loss": 3.537071466445923,
"eval_runtime": 4.9404,
"eval_samples_per_second": 4.656,
"eval_steps_per_second": 1.214,
"step": 990
},
{
"epoch": 18.04,
"learning_rate": 1.4538909090118846e-05,
"loss": 0.0424,
"step": 992
},
{
"epoch": 18.11,
"learning_rate": 1.3491511964480702e-05,
"loss": 0.0406,
"step": 996
},
{
"epoch": 18.18,
"learning_rate": 1.2482220564763668e-05,
"loss": 0.0404,
"step": 1000
},
{
"epoch": 18.25,
"learning_rate": 1.1511197505771842e-05,
"loss": 0.039,
"step": 1004
},
{
"epoch": 18.33,
"learning_rate": 1.0578599236598707e-05,
"loss": 0.0398,
"step": 1008
},
{
"epoch": 18.4,
"learning_rate": 9.684576015420277e-06,
"loss": 0.0347,
"step": 1012
},
{
"epoch": 18.47,
"learning_rate": 8.829271885286095e-06,
"loss": 0.0386,
"step": 1016
},
{
"epoch": 18.55,
"learning_rate": 8.012824650910938e-06,
"loss": 0.034,
"step": 1020
},
{
"epoch": 18.62,
"learning_rate": 7.235365856472442e-06,
"loss": 0.0349,
"step": 1024
},
{
"epoch": 18.69,
"learning_rate": 6.497020764416634e-06,
"loss": 0.0376,
"step": 1028
},
{
"epoch": 18.76,
"learning_rate": 5.797908335276214e-06,
"loss": 0.0422,
"step": 1032
},
{
"epoch": 18.84,
"learning_rate": 5.1381412085036995e-06,
"loss": 0.0404,
"step": 1036
},
{
"epoch": 18.91,
"learning_rate": 4.517825684323323e-06,
"loss": 0.0356,
"step": 1040
},
{
"epoch": 18.98,
"learning_rate": 3.937061706604072e-06,
"loss": 0.0468,
"step": 1044
},
{
"epoch": 19.0,
"gpt4_scores": 0.575,
"step": 1045
},
{
"epoch": 19.0,
"std": 0.12791598805466028,
"step": 1045
},
{
"epoch": 19.0,
"eval_loss": 3.5391297340393066,
"eval_runtime": 4.9485,
"eval_samples_per_second": 4.648,
"eval_steps_per_second": 1.212,
"step": 1045
},
{
"epoch": 19.05,
"learning_rate": 3.3959428467570664e-06,
"loss": 0.0362,
"step": 1048
},
{
"epoch": 19.13,
"learning_rate": 2.8945562886593944e-06,
"loss": 0.0408,
"step": 1052
},
{
"epoch": 19.2,
"learning_rate": 2.4329828146074094e-06,
"loss": 0.0391,
"step": 1056
},
{
"epoch": 19.27,
"learning_rate": 2.011296792301165e-06,
"loss": 0.0376,
"step": 1060
},
{
"epoch": 19.35,
"learning_rate": 1.6295661628624448e-06,
"loss": 0.0446,
"step": 1064
},
{
"epoch": 19.42,
"learning_rate": 1.2878524298882698e-06,
"loss": 0.0332,
"step": 1068
},
{
"epoch": 19.49,
"learning_rate": 9.862106495415469e-07,
"loss": 0.0375,
"step": 1072
},
{
"epoch": 19.56,
"learning_rate": 7.246894216806354e-07,
"loss": 0.0451,
"step": 1076
},
{
"epoch": 19.64,
"learning_rate": 5.033308820289185e-07,
"loss": 0.0451,
"step": 1080
},
{
"epoch": 19.71,
"learning_rate": 3.221706953860093e-07,
"loss": 0.0354,
"step": 1084
},
{
"epoch": 19.78,
"learning_rate": 1.8123804988159908e-07,
"loss": 0.0397,
"step": 1088
},
{
"epoch": 19.85,
"learning_rate": 8.0555652272718e-08,
"loss": 0.0394,
"step": 1092
},
{
"epoch": 19.93,
"learning_rate": 2.0139724285161975e-08,
"loss": 0.0321,
"step": 1096
},
{
"epoch": 20.0,
"learning_rate": 0.0,
"loss": 0.0358,
"step": 1100
},
{
"epoch": 20.0,
"step": 1100,
"total_flos": 3.792163606268314e+16,
"train_loss": 0.0,
"train_runtime": 11.9759,
"train_samples_per_second": 181.197,
"train_steps_per_second": 45.925
}
],
"logging_steps": 4,
"max_steps": 550,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 55,
"total_flos": 3.792163606268314e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}