ryusangwon's picture
End of training
2ba3ffe verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6596306068601583,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.989006156552331e-05,
"loss": 1.8212,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 4.978012313104662e-05,
"loss": 1.7676,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 4.967018469656992e-05,
"loss": 1.7575,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 4.956024626209323e-05,
"loss": 1.6747,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 4.9450307827616535e-05,
"loss": 1.5765,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 4.934036939313985e-05,
"loss": 1.6145,
"step": 60
},
{
"epoch": 0.05,
"learning_rate": 4.9230430958663146e-05,
"loss": 1.5378,
"step": 70
},
{
"epoch": 0.05,
"learning_rate": 4.912049252418646e-05,
"loss": 1.6184,
"step": 80
},
{
"epoch": 0.06,
"learning_rate": 4.901055408970976e-05,
"loss": 1.5486,
"step": 90
},
{
"epoch": 0.07,
"learning_rate": 4.8900615655233075e-05,
"loss": 1.5721,
"step": 100
},
{
"epoch": 0.07,
"learning_rate": 4.879067722075638e-05,
"loss": 1.5253,
"step": 110
},
{
"epoch": 0.08,
"learning_rate": 4.8680738786279686e-05,
"loss": 1.521,
"step": 120
},
{
"epoch": 0.09,
"learning_rate": 4.857080035180299e-05,
"loss": 1.5171,
"step": 130
},
{
"epoch": 0.09,
"learning_rate": 4.84608619173263e-05,
"loss": 1.5259,
"step": 140
},
{
"epoch": 0.1,
"learning_rate": 4.835092348284961e-05,
"loss": 1.5831,
"step": 150
},
{
"epoch": 0.11,
"learning_rate": 4.824098504837291e-05,
"loss": 1.5398,
"step": 160
},
{
"epoch": 0.11,
"learning_rate": 4.813104661389622e-05,
"loss": 1.5684,
"step": 170
},
{
"epoch": 0.12,
"learning_rate": 4.802110817941953e-05,
"loss": 1.5382,
"step": 180
},
{
"epoch": 0.13,
"learning_rate": 4.7911169744942836e-05,
"loss": 1.5588,
"step": 190
},
{
"epoch": 0.13,
"learning_rate": 4.780123131046614e-05,
"loss": 1.5192,
"step": 200
},
{
"epoch": 0.14,
"learning_rate": 4.7691292875989446e-05,
"loss": 1.6004,
"step": 210
},
{
"epoch": 0.15,
"learning_rate": 4.758135444151275e-05,
"loss": 1.5603,
"step": 220
},
{
"epoch": 0.15,
"learning_rate": 4.7471416007036063e-05,
"loss": 1.5532,
"step": 230
},
{
"epoch": 0.16,
"learning_rate": 4.736147757255937e-05,
"loss": 1.5632,
"step": 240
},
{
"epoch": 0.16,
"learning_rate": 4.7251539138082674e-05,
"loss": 1.5598,
"step": 250
},
{
"epoch": 0.17,
"learning_rate": 4.714160070360598e-05,
"loss": 1.54,
"step": 260
},
{
"epoch": 0.18,
"learning_rate": 4.703166226912929e-05,
"loss": 1.4843,
"step": 270
},
{
"epoch": 0.18,
"learning_rate": 4.6921723834652596e-05,
"loss": 1.5328,
"step": 280
},
{
"epoch": 0.19,
"learning_rate": 4.681178540017591e-05,
"loss": 1.5677,
"step": 290
},
{
"epoch": 0.2,
"learning_rate": 4.670184696569921e-05,
"loss": 1.5629,
"step": 300
},
{
"epoch": 0.2,
"learning_rate": 4.659190853122252e-05,
"loss": 1.5426,
"step": 310
},
{
"epoch": 0.21,
"learning_rate": 4.6481970096745824e-05,
"loss": 1.4997,
"step": 320
},
{
"epoch": 0.22,
"learning_rate": 4.6372031662269136e-05,
"loss": 1.5409,
"step": 330
},
{
"epoch": 0.22,
"learning_rate": 4.6262093227792435e-05,
"loss": 1.5269,
"step": 340
},
{
"epoch": 0.23,
"learning_rate": 4.615215479331575e-05,
"loss": 1.5079,
"step": 350
},
{
"epoch": 0.24,
"learning_rate": 4.604221635883905e-05,
"loss": 1.5031,
"step": 360
},
{
"epoch": 0.24,
"learning_rate": 4.593227792436236e-05,
"loss": 1.5412,
"step": 370
},
{
"epoch": 0.25,
"learning_rate": 4.582233948988567e-05,
"loss": 1.5096,
"step": 380
},
{
"epoch": 0.26,
"learning_rate": 4.5712401055408974e-05,
"loss": 1.5605,
"step": 390
},
{
"epoch": 0.26,
"learning_rate": 4.560246262093228e-05,
"loss": 1.5628,
"step": 400
},
{
"epoch": 0.27,
"learning_rate": 4.5492524186455585e-05,
"loss": 1.5396,
"step": 410
},
{
"epoch": 0.28,
"learning_rate": 4.53825857519789e-05,
"loss": 1.4757,
"step": 420
},
{
"epoch": 0.28,
"learning_rate": 4.52726473175022e-05,
"loss": 1.5366,
"step": 430
},
{
"epoch": 0.29,
"learning_rate": 4.516270888302551e-05,
"loss": 1.5037,
"step": 440
},
{
"epoch": 0.3,
"learning_rate": 4.505277044854881e-05,
"loss": 1.5336,
"step": 450
},
{
"epoch": 0.3,
"learning_rate": 4.4942832014072125e-05,
"loss": 1.5067,
"step": 460
},
{
"epoch": 0.31,
"learning_rate": 4.483289357959543e-05,
"loss": 1.4911,
"step": 470
},
{
"epoch": 0.32,
"learning_rate": 4.4722955145118735e-05,
"loss": 1.4891,
"step": 480
},
{
"epoch": 0.32,
"learning_rate": 4.461301671064204e-05,
"loss": 1.5337,
"step": 490
},
{
"epoch": 0.33,
"learning_rate": 4.450307827616535e-05,
"loss": 1.5254,
"step": 500
},
{
"epoch": 0.34,
"learning_rate": 4.439313984168866e-05,
"loss": 1.537,
"step": 510
},
{
"epoch": 0.34,
"learning_rate": 4.428320140721196e-05,
"loss": 1.4969,
"step": 520
},
{
"epoch": 0.35,
"learning_rate": 4.417326297273527e-05,
"loss": 1.4863,
"step": 530
},
{
"epoch": 0.36,
"learning_rate": 4.406332453825858e-05,
"loss": 1.5168,
"step": 540
},
{
"epoch": 0.36,
"learning_rate": 4.3953386103781885e-05,
"loss": 1.5242,
"step": 550
},
{
"epoch": 0.37,
"learning_rate": 4.384344766930519e-05,
"loss": 1.5307,
"step": 560
},
{
"epoch": 0.38,
"learning_rate": 4.3733509234828496e-05,
"loss": 1.5087,
"step": 570
},
{
"epoch": 0.38,
"learning_rate": 4.36235708003518e-05,
"loss": 1.5043,
"step": 580
},
{
"epoch": 0.39,
"learning_rate": 4.351363236587511e-05,
"loss": 1.4933,
"step": 590
},
{
"epoch": 0.4,
"learning_rate": 4.340369393139842e-05,
"loss": 1.5111,
"step": 600
},
{
"epoch": 0.4,
"learning_rate": 4.329375549692173e-05,
"loss": 1.4897,
"step": 610
},
{
"epoch": 0.41,
"learning_rate": 4.318381706244503e-05,
"loss": 1.502,
"step": 620
},
{
"epoch": 0.42,
"learning_rate": 4.307387862796834e-05,
"loss": 1.5016,
"step": 630
},
{
"epoch": 0.42,
"learning_rate": 4.2963940193491646e-05,
"loss": 1.5328,
"step": 640
},
{
"epoch": 0.43,
"learning_rate": 4.285400175901496e-05,
"loss": 1.5442,
"step": 650
},
{
"epoch": 0.44,
"learning_rate": 4.2744063324538256e-05,
"loss": 1.527,
"step": 660
},
{
"epoch": 0.44,
"learning_rate": 4.263412489006157e-05,
"loss": 1.5327,
"step": 670
},
{
"epoch": 0.45,
"learning_rate": 4.2524186455584874e-05,
"loss": 1.5571,
"step": 680
},
{
"epoch": 0.46,
"learning_rate": 4.2414248021108186e-05,
"loss": 1.5297,
"step": 690
},
{
"epoch": 0.46,
"learning_rate": 4.230430958663149e-05,
"loss": 1.4894,
"step": 700
},
{
"epoch": 0.47,
"learning_rate": 4.2194371152154796e-05,
"loss": 1.5008,
"step": 710
},
{
"epoch": 0.47,
"learning_rate": 4.20844327176781e-05,
"loss": 1.5135,
"step": 720
},
{
"epoch": 0.48,
"learning_rate": 4.1974494283201407e-05,
"loss": 1.5081,
"step": 730
},
{
"epoch": 0.49,
"learning_rate": 4.186455584872472e-05,
"loss": 1.5003,
"step": 740
},
{
"epoch": 0.49,
"learning_rate": 4.1754617414248024e-05,
"loss": 1.5154,
"step": 750
},
{
"epoch": 0.5,
"learning_rate": 4.164467897977133e-05,
"loss": 1.4863,
"step": 760
},
{
"epoch": 0.51,
"learning_rate": 4.1534740545294634e-05,
"loss": 1.5193,
"step": 770
},
{
"epoch": 0.51,
"learning_rate": 4.1424802110817946e-05,
"loss": 1.4906,
"step": 780
},
{
"epoch": 0.52,
"learning_rate": 4.131486367634125e-05,
"loss": 1.5434,
"step": 790
},
{
"epoch": 0.53,
"learning_rate": 4.120492524186456e-05,
"loss": 1.5458,
"step": 800
},
{
"epoch": 0.53,
"learning_rate": 4.109498680738786e-05,
"loss": 1.5612,
"step": 810
},
{
"epoch": 0.54,
"learning_rate": 4.0985048372911174e-05,
"loss": 1.5537,
"step": 820
},
{
"epoch": 0.55,
"learning_rate": 4.087510993843448e-05,
"loss": 1.5206,
"step": 830
},
{
"epoch": 0.55,
"learning_rate": 4.0765171503957784e-05,
"loss": 1.492,
"step": 840
},
{
"epoch": 0.56,
"learning_rate": 4.065523306948109e-05,
"loss": 1.4721,
"step": 850
},
{
"epoch": 0.57,
"learning_rate": 4.05452946350044e-05,
"loss": 1.5354,
"step": 860
},
{
"epoch": 0.57,
"learning_rate": 4.043535620052771e-05,
"loss": 1.5165,
"step": 870
},
{
"epoch": 0.58,
"learning_rate": 4.032541776605102e-05,
"loss": 1.5407,
"step": 880
},
{
"epoch": 0.59,
"learning_rate": 4.021547933157432e-05,
"loss": 1.5151,
"step": 890
},
{
"epoch": 0.59,
"learning_rate": 4.010554089709763e-05,
"loss": 1.5483,
"step": 900
},
{
"epoch": 0.6,
"learning_rate": 3.9995602462620935e-05,
"loss": 1.4871,
"step": 910
},
{
"epoch": 0.61,
"learning_rate": 3.988566402814424e-05,
"loss": 1.5558,
"step": 920
},
{
"epoch": 0.61,
"learning_rate": 3.9775725593667545e-05,
"loss": 1.5084,
"step": 930
},
{
"epoch": 0.62,
"learning_rate": 3.966578715919085e-05,
"loss": 1.509,
"step": 940
},
{
"epoch": 0.63,
"learning_rate": 3.955584872471416e-05,
"loss": 1.5116,
"step": 950
},
{
"epoch": 0.63,
"learning_rate": 3.944591029023747e-05,
"loss": 1.482,
"step": 960
},
{
"epoch": 0.64,
"learning_rate": 3.933597185576078e-05,
"loss": 1.4935,
"step": 970
},
{
"epoch": 0.65,
"learning_rate": 3.922603342128408e-05,
"loss": 1.5034,
"step": 980
},
{
"epoch": 0.65,
"learning_rate": 3.911609498680739e-05,
"loss": 1.5448,
"step": 990
},
{
"epoch": 0.66,
"learning_rate": 3.9006156552330695e-05,
"loss": 1.481,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 4548,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 6.3202482192384e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}