|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.12358882947118241, |
|
"eval_steps": 62, |
|
"global_step": 40, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0030897207367795603, |
|
"grad_norm": 122078.609375, |
|
"learning_rate": 4.0000000000000007e-10, |
|
"loss": 28.8098, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006179441473559121, |
|
"grad_norm": 119212.0546875, |
|
"learning_rate": 8.000000000000001e-10, |
|
"loss": 28.2858, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009269162210338681, |
|
"grad_norm": 126451.015625, |
|
"learning_rate": 1.2e-09, |
|
"loss": 27.4565, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.012358882947118241, |
|
"grad_norm": 113181.8359375, |
|
"learning_rate": 1.6000000000000003e-09, |
|
"loss": 29.2436, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.015448603683897801, |
|
"grad_norm": 122404.765625, |
|
"learning_rate": 2e-09, |
|
"loss": 27.6679, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.018538324420677363, |
|
"grad_norm": 122852.390625, |
|
"learning_rate": 1.9984815164333164e-09, |
|
"loss": 26.8483, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02162804515745692, |
|
"grad_norm": 123315.046875, |
|
"learning_rate": 1.9939306773179497e-09, |
|
"loss": 27.4809, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.024717765894236483, |
|
"grad_norm": 125134.7109375, |
|
"learning_rate": 1.9863613034027226e-09, |
|
"loss": 27.0587, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.027807486631016044, |
|
"grad_norm": 123739.59375, |
|
"learning_rate": 1.9757963826274356e-09, |
|
"loss": 26.7539, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.030897207367795602, |
|
"grad_norm": 137136.96875, |
|
"learning_rate": 1.9622680003092504e-09, |
|
"loss": 23.7809, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03398692810457516, |
|
"grad_norm": 123784.703125, |
|
"learning_rate": 1.9458172417006344e-09, |
|
"loss": 26.1442, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.037076648841354726, |
|
"grad_norm": 103670.2421875, |
|
"learning_rate": 1.9264940672148016e-09, |
|
"loss": 30.3218, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.040166369578134284, |
|
"grad_norm": 132834.09375, |
|
"learning_rate": 1.9043571606975775e-09, |
|
"loss": 25.762, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04325609031491384, |
|
"grad_norm": 122543.75, |
|
"learning_rate": 1.879473751206489e-09, |
|
"loss": 26.66, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04634581105169341, |
|
"grad_norm": 125605.171875, |
|
"learning_rate": 1.8519194088383272e-09, |
|
"loss": 27.83, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.049435531788472965, |
|
"grad_norm": 131139.46875, |
|
"learning_rate": 1.8217778152252452e-09, |
|
"loss": 25.1872, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.052525252525252523, |
|
"grad_norm": 135191.375, |
|
"learning_rate": 1.7891405093963938e-09, |
|
"loss": 25.44, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05561497326203209, |
|
"grad_norm": 112697.7890625, |
|
"learning_rate": 1.7541066097768964e-09, |
|
"loss": 28.6852, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05870469399881165, |
|
"grad_norm": 124782.5390625, |
|
"learning_rate": 1.7167825131684514e-09, |
|
"loss": 27.2324, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.061794414735591205, |
|
"grad_norm": 126367.6875, |
|
"learning_rate": 1.6772815716257414e-09, |
|
"loss": 25.7919, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06488413547237076, |
|
"grad_norm": 121215.390625, |
|
"learning_rate": 1.6357237482099683e-09, |
|
"loss": 28.0235, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06797385620915032, |
|
"grad_norm": 126557.078125, |
|
"learning_rate": 1.5922352526649803e-09, |
|
"loss": 26.7966, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0710635769459299, |
|
"grad_norm": 120041.1953125, |
|
"learning_rate": 1.5469481581224272e-09, |
|
"loss": 26.8349, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07415329768270945, |
|
"grad_norm": 119010.8046875, |
|
"learning_rate": 1.5000000000000002e-09, |
|
"loss": 26.7727, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07724301841948901, |
|
"grad_norm": 132196.0, |
|
"learning_rate": 1.4515333583108895e-09, |
|
"loss": 26.3286, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08033273915626857, |
|
"grad_norm": 130052.84375, |
|
"learning_rate": 1.4016954246529696e-09, |
|
"loss": 27.3852, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08342245989304813, |
|
"grad_norm": 122084.9375, |
|
"learning_rate": 1.3506375551927546e-09, |
|
"loss": 26.589, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08651218062982768, |
|
"grad_norm": 123355.5234375, |
|
"learning_rate": 1.2985148110016946e-09, |
|
"loss": 27.7932, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08960190136660724, |
|
"grad_norm": 121250.8125, |
|
"learning_rate": 1.2454854871407993e-09, |
|
"loss": 26.5707, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09269162210338681, |
|
"grad_norm": 117199.703125, |
|
"learning_rate": 1.1917106319237385e-09, |
|
"loss": 28.5464, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09578134284016637, |
|
"grad_norm": 127383.734375, |
|
"learning_rate": 1.1373535578184083e-09, |
|
"loss": 27.1054, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.09887106357694593, |
|
"grad_norm": 129232.5234375, |
|
"learning_rate": 1.0825793454723326e-09, |
|
"loss": 25.4971, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.10196078431372549, |
|
"grad_norm": 116733.3984375, |
|
"learning_rate": 1.0275543423681622e-09, |
|
"loss": 29.6508, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.10505050505050505, |
|
"grad_norm": 120443.4296875, |
|
"learning_rate": 9.724456576318381e-10, |
|
"loss": 27.152, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.1081402257872846, |
|
"grad_norm": 118378.640625, |
|
"learning_rate": 9.174206545276678e-10, |
|
"loss": 28.7253, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11122994652406418, |
|
"grad_norm": 122584.96875, |
|
"learning_rate": 8.626464421815919e-10, |
|
"loss": 28.6023, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11431966726084374, |
|
"grad_norm": 126065.9453125, |
|
"learning_rate": 8.082893680762619e-10, |
|
"loss": 26.1743, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1174093879976233, |
|
"grad_norm": 132074.5, |
|
"learning_rate": 7.545145128592009e-10, |
|
"loss": 24.2718, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.12049910873440285, |
|
"grad_norm": 121406.8515625, |
|
"learning_rate": 7.014851889983058e-10, |
|
"loss": 27.4691, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12358882947118241, |
|
"grad_norm": 117569.140625, |
|
"learning_rate": 6.493624448072458e-10, |
|
"loss": 28.1294, |
|
"step": 40 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 62, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.637517228998656e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|