ryusangwon's picture
End of training
2ba3ffe verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.32981530343007914,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.989006156552331e-05,
"loss": 1.8212,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 4.978012313104662e-05,
"loss": 1.7676,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 4.967018469656992e-05,
"loss": 1.7575,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 4.956024626209323e-05,
"loss": 1.6747,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 4.9450307827616535e-05,
"loss": 1.5765,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 4.934036939313985e-05,
"loss": 1.6145,
"step": 60
},
{
"epoch": 0.05,
"learning_rate": 4.9230430958663146e-05,
"loss": 1.5378,
"step": 70
},
{
"epoch": 0.05,
"learning_rate": 4.912049252418646e-05,
"loss": 1.6184,
"step": 80
},
{
"epoch": 0.06,
"learning_rate": 4.901055408970976e-05,
"loss": 1.5486,
"step": 90
},
{
"epoch": 0.07,
"learning_rate": 4.8900615655233075e-05,
"loss": 1.5721,
"step": 100
},
{
"epoch": 0.07,
"learning_rate": 4.879067722075638e-05,
"loss": 1.5253,
"step": 110
},
{
"epoch": 0.08,
"learning_rate": 4.8680738786279686e-05,
"loss": 1.521,
"step": 120
},
{
"epoch": 0.09,
"learning_rate": 4.857080035180299e-05,
"loss": 1.5171,
"step": 130
},
{
"epoch": 0.09,
"learning_rate": 4.84608619173263e-05,
"loss": 1.5259,
"step": 140
},
{
"epoch": 0.1,
"learning_rate": 4.835092348284961e-05,
"loss": 1.5831,
"step": 150
},
{
"epoch": 0.11,
"learning_rate": 4.824098504837291e-05,
"loss": 1.5398,
"step": 160
},
{
"epoch": 0.11,
"learning_rate": 4.813104661389622e-05,
"loss": 1.5684,
"step": 170
},
{
"epoch": 0.12,
"learning_rate": 4.802110817941953e-05,
"loss": 1.5382,
"step": 180
},
{
"epoch": 0.13,
"learning_rate": 4.7911169744942836e-05,
"loss": 1.5588,
"step": 190
},
{
"epoch": 0.13,
"learning_rate": 4.780123131046614e-05,
"loss": 1.5192,
"step": 200
},
{
"epoch": 0.14,
"learning_rate": 4.7691292875989446e-05,
"loss": 1.6004,
"step": 210
},
{
"epoch": 0.15,
"learning_rate": 4.758135444151275e-05,
"loss": 1.5603,
"step": 220
},
{
"epoch": 0.15,
"learning_rate": 4.7471416007036063e-05,
"loss": 1.5532,
"step": 230
},
{
"epoch": 0.16,
"learning_rate": 4.736147757255937e-05,
"loss": 1.5632,
"step": 240
},
{
"epoch": 0.16,
"learning_rate": 4.7251539138082674e-05,
"loss": 1.5598,
"step": 250
},
{
"epoch": 0.17,
"learning_rate": 4.714160070360598e-05,
"loss": 1.54,
"step": 260
},
{
"epoch": 0.18,
"learning_rate": 4.703166226912929e-05,
"loss": 1.4843,
"step": 270
},
{
"epoch": 0.18,
"learning_rate": 4.6921723834652596e-05,
"loss": 1.5328,
"step": 280
},
{
"epoch": 0.19,
"learning_rate": 4.681178540017591e-05,
"loss": 1.5677,
"step": 290
},
{
"epoch": 0.2,
"learning_rate": 4.670184696569921e-05,
"loss": 1.5629,
"step": 300
},
{
"epoch": 0.2,
"learning_rate": 4.659190853122252e-05,
"loss": 1.5426,
"step": 310
},
{
"epoch": 0.21,
"learning_rate": 4.6481970096745824e-05,
"loss": 1.4997,
"step": 320
},
{
"epoch": 0.22,
"learning_rate": 4.6372031662269136e-05,
"loss": 1.5409,
"step": 330
},
{
"epoch": 0.22,
"learning_rate": 4.6262093227792435e-05,
"loss": 1.5269,
"step": 340
},
{
"epoch": 0.23,
"learning_rate": 4.615215479331575e-05,
"loss": 1.5079,
"step": 350
},
{
"epoch": 0.24,
"learning_rate": 4.604221635883905e-05,
"loss": 1.5031,
"step": 360
},
{
"epoch": 0.24,
"learning_rate": 4.593227792436236e-05,
"loss": 1.5412,
"step": 370
},
{
"epoch": 0.25,
"learning_rate": 4.582233948988567e-05,
"loss": 1.5096,
"step": 380
},
{
"epoch": 0.26,
"learning_rate": 4.5712401055408974e-05,
"loss": 1.5605,
"step": 390
},
{
"epoch": 0.26,
"learning_rate": 4.560246262093228e-05,
"loss": 1.5628,
"step": 400
},
{
"epoch": 0.27,
"learning_rate": 4.5492524186455585e-05,
"loss": 1.5396,
"step": 410
},
{
"epoch": 0.28,
"learning_rate": 4.53825857519789e-05,
"loss": 1.4757,
"step": 420
},
{
"epoch": 0.28,
"learning_rate": 4.52726473175022e-05,
"loss": 1.5366,
"step": 430
},
{
"epoch": 0.29,
"learning_rate": 4.516270888302551e-05,
"loss": 1.5037,
"step": 440
},
{
"epoch": 0.3,
"learning_rate": 4.505277044854881e-05,
"loss": 1.5336,
"step": 450
},
{
"epoch": 0.3,
"learning_rate": 4.4942832014072125e-05,
"loss": 1.5067,
"step": 460
},
{
"epoch": 0.31,
"learning_rate": 4.483289357959543e-05,
"loss": 1.4911,
"step": 470
},
{
"epoch": 0.32,
"learning_rate": 4.4722955145118735e-05,
"loss": 1.4891,
"step": 480
},
{
"epoch": 0.32,
"learning_rate": 4.461301671064204e-05,
"loss": 1.5337,
"step": 490
},
{
"epoch": 0.33,
"learning_rate": 4.450307827616535e-05,
"loss": 1.5254,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 4548,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 3.1601241096192e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}