sedrickkeh's picture
End of training
34bd3fe verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 504,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05952380952380952,
"grad_norm": 12.939931126188862,
"learning_rate": 1.9230769230769234e-06,
"loss": 0.8351,
"step": 10
},
{
"epoch": 0.11904761904761904,
"grad_norm": 1.7991171348010244,
"learning_rate": 3.846153846153847e-06,
"loss": 0.694,
"step": 20
},
{
"epoch": 0.17857142857142858,
"grad_norm": 4.232210539308385,
"learning_rate": 4.999222516029188e-06,
"loss": 0.6323,
"step": 30
},
{
"epoch": 0.23809523809523808,
"grad_norm": 3.3828957748906325,
"learning_rate": 4.990481990931694e-06,
"loss": 0.5953,
"step": 40
},
{
"epoch": 0.2976190476190476,
"grad_norm": 2.4316476569375514,
"learning_rate": 4.972066953731755e-06,
"loss": 0.5715,
"step": 50
},
{
"epoch": 0.35714285714285715,
"grad_norm": 1.7570071553637299,
"learning_rate": 4.944056921471231e-06,
"loss": 0.5503,
"step": 60
},
{
"epoch": 0.4166666666666667,
"grad_norm": 2.079510879509323,
"learning_rate": 4.906572842855441e-06,
"loss": 0.5389,
"step": 70
},
{
"epoch": 0.47619047619047616,
"grad_norm": 2.1496119249362886,
"learning_rate": 4.859776575990668e-06,
"loss": 0.5303,
"step": 80
},
{
"epoch": 0.5357142857142857,
"grad_norm": 1.5629017292456824,
"learning_rate": 4.803870189472853e-06,
"loss": 0.523,
"step": 90
},
{
"epoch": 0.5952380952380952,
"grad_norm": 1.69423474877199,
"learning_rate": 4.73909508984539e-06,
"loss": 0.5128,
"step": 100
},
{
"epoch": 0.6547619047619048,
"grad_norm": 2.3165081258842823,
"learning_rate": 4.665730979193731e-06,
"loss": 0.5103,
"step": 110
},
{
"epoch": 0.7142857142857143,
"grad_norm": 2.5491510204946994,
"learning_rate": 4.5840946473779415e-06,
"loss": 0.504,
"step": 120
},
{
"epoch": 0.7738095238095238,
"grad_norm": 2.585441354830173,
"learning_rate": 4.494538604118408e-06,
"loss": 0.5006,
"step": 130
},
{
"epoch": 0.8333333333333334,
"grad_norm": 1.3929857956928453,
"learning_rate": 4.397449556841417e-06,
"loss": 0.4943,
"step": 140
},
{
"epoch": 0.8928571428571429,
"grad_norm": 1.7781994865228183,
"learning_rate": 4.293246740857364e-06,
"loss": 0.491,
"step": 150
},
{
"epoch": 0.9523809523809523,
"grad_norm": 1.442019716847258,
"learning_rate": 4.1823801090819264e-06,
"loss": 0.4858,
"step": 160
},
{
"epoch": 1.0,
"eval_loss": 0.06062576174736023,
"eval_runtime": 118.4325,
"eval_samples_per_second": 152.855,
"eval_steps_per_second": 0.599,
"step": 168
},
{
"epoch": 1.0119047619047619,
"grad_norm": 1.9573748846725199,
"learning_rate": 4.065328389117115e-06,
"loss": 0.4764,
"step": 170
},
{
"epoch": 1.0714285714285714,
"grad_norm": 2.4665695528371394,
"learning_rate": 3.9425970160818e-06,
"loss": 0.437,
"step": 180
},
{
"epoch": 1.130952380952381,
"grad_norm": 2.159693770772968,
"learning_rate": 3.814715950117856e-06,
"loss": 0.4342,
"step": 190
},
{
"epoch": 1.1904761904761905,
"grad_norm": 2.623424180296988,
"learning_rate": 3.6822373879960433e-06,
"loss": 0.4336,
"step": 200
},
{
"epoch": 1.25,
"grad_norm": 2.306273952224096,
"learning_rate": 3.5457333787030318e-06,
"loss": 0.4363,
"step": 210
},
{
"epoch": 1.3095238095238095,
"grad_norm": 2.347236799667969,
"learning_rate": 3.4057933533055654e-06,
"loss": 0.4311,
"step": 220
},
{
"epoch": 1.369047619047619,
"grad_norm": 1.9750374738964078,
"learning_rate": 3.263021579757952e-06,
"loss": 0.4306,
"step": 230
},
{
"epoch": 1.4285714285714286,
"grad_norm": 1.6122977161867083,
"learning_rate": 3.118034553643137e-06,
"loss": 0.4294,
"step": 240
},
{
"epoch": 1.4880952380952381,
"grad_norm": 1.4916294701985282,
"learning_rate": 2.971458336114263e-06,
"loss": 0.427,
"step": 250
},
{
"epoch": 1.5476190476190477,
"grad_norm": 1.6880715005839675,
"learning_rate": 2.8239258505316425e-06,
"loss": 0.4263,
"step": 260
},
{
"epoch": 1.6071428571428572,
"grad_norm": 1.9685917922530944,
"learning_rate": 2.676074149468358e-06,
"loss": 0.4248,
"step": 270
},
{
"epoch": 1.6666666666666665,
"grad_norm": 1.9400247213989275,
"learning_rate": 2.5285416638857373e-06,
"loss": 0.4253,
"step": 280
},
{
"epoch": 1.7261904761904763,
"grad_norm": 1.7940899648128694,
"learning_rate": 2.3819654463568643e-06,
"loss": 0.4252,
"step": 290
},
{
"epoch": 1.7857142857142856,
"grad_norm": 1.7952795834561055,
"learning_rate": 2.236978420242048e-06,
"loss": 0.4205,
"step": 300
},
{
"epoch": 1.8452380952380953,
"grad_norm": 1.9405074077635567,
"learning_rate": 2.0942066466944353e-06,
"loss": 0.4198,
"step": 310
},
{
"epoch": 1.9047619047619047,
"grad_norm": 2.319401680615803,
"learning_rate": 1.9542666212969685e-06,
"loss": 0.4195,
"step": 320
},
{
"epoch": 1.9642857142857144,
"grad_norm": 1.3914546183519685,
"learning_rate": 1.817762612003957e-06,
"loss": 0.4198,
"step": 330
},
{
"epoch": 2.0,
"eval_loss": 0.05766107514500618,
"eval_runtime": 116.1152,
"eval_samples_per_second": 155.906,
"eval_steps_per_second": 0.611,
"step": 336
},
{
"epoch": 2.0238095238095237,
"grad_norm": 1.9311450987509786,
"learning_rate": 1.6852840498821444e-06,
"loss": 0.4022,
"step": 340
},
{
"epoch": 2.0833333333333335,
"grad_norm": 1.337234001678788,
"learning_rate": 1.5574029839182003e-06,
"loss": 0.3793,
"step": 350
},
{
"epoch": 2.142857142857143,
"grad_norm": 1.535897033938721,
"learning_rate": 1.4346716108828857e-06,
"loss": 0.3754,
"step": 360
},
{
"epoch": 2.2023809523809526,
"grad_norm": 1.5338815260091754,
"learning_rate": 1.317619890918075e-06,
"loss": 0.3752,
"step": 370
},
{
"epoch": 2.261904761904762,
"grad_norm": 1.2655478949407772,
"learning_rate": 1.2067532591426361e-06,
"loss": 0.3781,
"step": 380
},
{
"epoch": 2.3214285714285716,
"grad_norm": 0.9274508382487906,
"learning_rate": 1.1025504431585827e-06,
"loss": 0.3747,
"step": 390
},
{
"epoch": 2.380952380952381,
"grad_norm": 0.800409494055116,
"learning_rate": 1.0054613958815923e-06,
"loss": 0.3739,
"step": 400
},
{
"epoch": 2.4404761904761907,
"grad_norm": 0.7731524413680763,
"learning_rate": 9.159053526220585e-07,
"loss": 0.3739,
"step": 410
},
{
"epoch": 2.5,
"grad_norm": 0.7412568103254032,
"learning_rate": 8.342690208062692e-07,
"loss": 0.3742,
"step": 420
},
{
"epoch": 2.5595238095238093,
"grad_norm": 0.7413769804004414,
"learning_rate": 7.609049101546112e-07,
"loss": 0.3756,
"step": 430
},
{
"epoch": 2.619047619047619,
"grad_norm": 0.7818994788408196,
"learning_rate": 6.961298105271477e-07,
"loss": 0.3763,
"step": 440
},
{
"epoch": 2.678571428571429,
"grad_norm": 0.8385062252292741,
"learning_rate": 6.40223424009332e-07,
"loss": 0.3745,
"step": 450
},
{
"epoch": 2.738095238095238,
"grad_norm": 0.8169035396439738,
"learning_rate": 5.9342715714456e-07,
"loss": 0.3754,
"step": 460
},
{
"epoch": 2.7976190476190474,
"grad_norm": 0.8039460348032803,
"learning_rate": 5.559430785287691e-07,
"loss": 0.3745,
"step": 470
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.7337994863983218,
"learning_rate": 5.27933046268245e-07,
"loss": 0.3739,
"step": 480
},
{
"epoch": 2.9166666666666665,
"grad_norm": 0.7009497476164986,
"learning_rate": 5.095180090683062e-07,
"loss": 0.3713,
"step": 490
},
{
"epoch": 2.9761904761904763,
"grad_norm": 0.709649576111715,
"learning_rate": 5.007774839708124e-07,
"loss": 0.3732,
"step": 500
},
{
"epoch": 3.0,
"eval_loss": 0.057630255818367004,
"eval_runtime": 114.7419,
"eval_samples_per_second": 157.771,
"eval_steps_per_second": 0.619,
"step": 504
},
{
"epoch": 3.0,
"step": 504,
"total_flos": 3376037568184320.0,
"train_loss": 0.453020451560853,
"train_runtime": 17002.7178,
"train_samples_per_second": 60.688,
"train_steps_per_second": 0.03
}
],
"logging_steps": 10,
"max_steps": 504,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3376037568184320.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}