|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.09269162210338681, |
|
"eval_steps": 52, |
|
"global_step": 30, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0030897207367795603, |
|
"grad_norm": 121869.7109375, |
|
"learning_rate": 4.0000000000000004e-11, |
|
"loss": 28.5226, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006179441473559121, |
|
"grad_norm": 118959.8046875, |
|
"learning_rate": 8.000000000000001e-11, |
|
"loss": 28.0073, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009269162210338681, |
|
"grad_norm": 126135.03125, |
|
"learning_rate": 1.2e-10, |
|
"loss": 27.1627, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.012358882947118241, |
|
"grad_norm": 112884.65625, |
|
"learning_rate": 1.6000000000000002e-10, |
|
"loss": 28.973, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.015448603683897801, |
|
"grad_norm": 122128.5390625, |
|
"learning_rate": 2e-10, |
|
"loss": 27.385, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.018538324420677363, |
|
"grad_norm": 122594.34375, |
|
"learning_rate": 1.9977668786231533e-10, |
|
"loss": 26.5713, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02162804515745692, |
|
"grad_norm": 123062.546875, |
|
"learning_rate": 1.9910774881547801e-10, |
|
"loss": 27.2005, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.024717765894236483, |
|
"grad_norm": 124900.0078125, |
|
"learning_rate": 1.9799617050365868e-10, |
|
"loss": 26.7775, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.027807486631016044, |
|
"grad_norm": 123474.9375, |
|
"learning_rate": 1.9644691750543768e-10, |
|
"loss": 26.5014, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.030897207367795602, |
|
"grad_norm": 136878.453125, |
|
"learning_rate": 1.9446690916079189e-10, |
|
"loss": 23.5287, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03398692810457516, |
|
"grad_norm": 123598.8671875, |
|
"learning_rate": 1.9206498866764287e-10, |
|
"loss": 25.913, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.037076648841354726, |
|
"grad_norm": 103480.9375, |
|
"learning_rate": 1.8925188358598814e-10, |
|
"loss": 30.122, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.040166369578134284, |
|
"grad_norm": 132586.203125, |
|
"learning_rate": 1.8604015792601395e-10, |
|
"loss": 25.5228, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04325609031491384, |
|
"grad_norm": 122334.4375, |
|
"learning_rate": 1.8244415603417604e-10, |
|
"loss": 26.4373, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04634581105169341, |
|
"grad_norm": 125392.0390625, |
|
"learning_rate": 1.784799385278661e-10, |
|
"loss": 27.6052, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.049435531788472965, |
|
"grad_norm": 130922.2578125, |
|
"learning_rate": 1.7416521056479579e-10, |
|
"loss": 24.9613, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.052525252525252523, |
|
"grad_norm": 135075.28125, |
|
"learning_rate": 1.6951924276746424e-10, |
|
"loss": 25.3005, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05561497326203209, |
|
"grad_norm": 112619.53125, |
|
"learning_rate": 1.6456278515588024e-10, |
|
"loss": 28.5963, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05870469399881165, |
|
"grad_norm": 124712.125, |
|
"learning_rate": 1.5931797447293553e-10, |
|
"loss": 27.1466, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.061794414735591205, |
|
"grad_norm": 126235.96875, |
|
"learning_rate": 1.5380823531633727e-10, |
|
"loss": 25.7108, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06488413547237076, |
|
"grad_norm": 121149.0625, |
|
"learning_rate": 1.480581755186684e-10, |
|
"loss": 27.9495, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06797385620915032, |
|
"grad_norm": 126460.7265625, |
|
"learning_rate": 1.4209347624283351e-10, |
|
"loss": 26.7249, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0710635769459299, |
|
"grad_norm": 119974.6484375, |
|
"learning_rate": 1.3594077728375128e-10, |
|
"loss": 26.7696, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07415329768270945, |
|
"grad_norm": 118920.1875, |
|
"learning_rate": 1.296275580885634e-10, |
|
"loss": 26.7094, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07724301841948901, |
|
"grad_norm": 132117.25, |
|
"learning_rate": 1.2318201502675284e-10, |
|
"loss": 26.2662, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08033273915626857, |
|
"grad_norm": 130024.0859375, |
|
"learning_rate": 1.1663293545831302e-10, |
|
"loss": 27.342, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08342245989304813, |
|
"grad_norm": 122062.3984375, |
|
"learning_rate": 1.1000956916240985e-10, |
|
"loss": 26.5586, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08651218062982768, |
|
"grad_norm": 123341.65625, |
|
"learning_rate": 1.0334149770076747e-10, |
|
"loss": 27.7682, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08960190136660724, |
|
"grad_norm": 121228.375, |
|
"learning_rate": 9.665850229923258e-11, |
|
"loss": 26.549, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09269162210338681, |
|
"grad_norm": 117187.109375, |
|
"learning_rate": 8.999043083759017e-11, |
|
"loss": 28.5272, |
|
"step": 30 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 52, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.978137921748992e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|