|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.042625745950554135, |
|
"eval_steps": 3, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0017050298380221654, |
|
"grad_norm": 1.6034964323043823, |
|
"learning_rate": 5e-05, |
|
"loss": 1.4885, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0017050298380221654, |
|
"eval_loss": 1.3765087127685547, |
|
"eval_runtime": 115.0675, |
|
"eval_samples_per_second": 6.44, |
|
"eval_steps_per_second": 1.616, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0034100596760443308, |
|
"grad_norm": 1.3994951248168945, |
|
"learning_rate": 0.0001, |
|
"loss": 1.639, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005115089514066497, |
|
"grad_norm": 1.1658259630203247, |
|
"learning_rate": 9.565217391304348e-05, |
|
"loss": 1.2317, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005115089514066497, |
|
"eval_loss": 1.2974878549575806, |
|
"eval_runtime": 115.0701, |
|
"eval_samples_per_second": 6.44, |
|
"eval_steps_per_second": 1.616, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0068201193520886615, |
|
"grad_norm": 4.647302150726318, |
|
"learning_rate": 9.130434782608696e-05, |
|
"loss": 2.0665, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.008525149190110827, |
|
"grad_norm": 1.481662392616272, |
|
"learning_rate": 8.695652173913044e-05, |
|
"loss": 1.6932, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.010230179028132993, |
|
"grad_norm": 3.079343795776367, |
|
"learning_rate": 8.260869565217392e-05, |
|
"loss": 1.7811, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010230179028132993, |
|
"eval_loss": 1.2517597675323486, |
|
"eval_runtime": 115.501, |
|
"eval_samples_per_second": 6.416, |
|
"eval_steps_per_second": 1.61, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.011935208866155157, |
|
"grad_norm": 0.97172611951828, |
|
"learning_rate": 7.82608695652174e-05, |
|
"loss": 1.6069, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.013640238704177323, |
|
"grad_norm": 0.934044599533081, |
|
"learning_rate": 7.391304347826086e-05, |
|
"loss": 1.2855, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.015345268542199489, |
|
"grad_norm": 0.8091419339179993, |
|
"learning_rate": 6.956521739130436e-05, |
|
"loss": 1.1789, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.015345268542199489, |
|
"eval_loss": 1.2307988405227661, |
|
"eval_runtime": 115.0172, |
|
"eval_samples_per_second": 6.443, |
|
"eval_steps_per_second": 1.617, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.017050298380221655, |
|
"grad_norm": 0.9011499285697937, |
|
"learning_rate": 6.521739130434783e-05, |
|
"loss": 1.3867, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01875532821824382, |
|
"grad_norm": 0.7925646901130676, |
|
"learning_rate": 6.086956521739131e-05, |
|
"loss": 1.3662, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.020460358056265986, |
|
"grad_norm": 0.7750657796859741, |
|
"learning_rate": 5.652173913043478e-05, |
|
"loss": 1.2866, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.020460358056265986, |
|
"eval_loss": 1.2137305736541748, |
|
"eval_runtime": 115.3185, |
|
"eval_samples_per_second": 6.426, |
|
"eval_steps_per_second": 1.613, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.02216538789428815, |
|
"grad_norm": 1.9830117225646973, |
|
"learning_rate": 5.217391304347826e-05, |
|
"loss": 1.6887, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.023870417732310314, |
|
"grad_norm": 0.6523770093917847, |
|
"learning_rate": 4.782608695652174e-05, |
|
"loss": 1.3654, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.02557544757033248, |
|
"grad_norm": 0.7408842444419861, |
|
"learning_rate": 4.347826086956522e-05, |
|
"loss": 1.344, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02557544757033248, |
|
"eval_loss": 1.2061489820480347, |
|
"eval_runtime": 115.3493, |
|
"eval_samples_per_second": 6.424, |
|
"eval_steps_per_second": 1.612, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.027280477408354646, |
|
"grad_norm": 0.7093881964683533, |
|
"learning_rate": 3.91304347826087e-05, |
|
"loss": 1.3975, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.028985507246376812, |
|
"grad_norm": 0.8234415054321289, |
|
"learning_rate": 3.478260869565218e-05, |
|
"loss": 1.1462, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.030690537084398978, |
|
"grad_norm": 0.801357090473175, |
|
"learning_rate": 3.0434782608695656e-05, |
|
"loss": 1.4323, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.030690537084398978, |
|
"eval_loss": 1.2006877660751343, |
|
"eval_runtime": 115.0416, |
|
"eval_samples_per_second": 6.441, |
|
"eval_steps_per_second": 1.617, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03239556692242114, |
|
"grad_norm": 0.6534079313278198, |
|
"learning_rate": 2.608695652173913e-05, |
|
"loss": 1.3427, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03410059676044331, |
|
"grad_norm": 0.6739923357963562, |
|
"learning_rate": 2.173913043478261e-05, |
|
"loss": 1.3338, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03580562659846547, |
|
"grad_norm": 0.7460229396820068, |
|
"learning_rate": 1.739130434782609e-05, |
|
"loss": 1.2776, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03580562659846547, |
|
"eval_loss": 1.1980022192001343, |
|
"eval_runtime": 115.3914, |
|
"eval_samples_per_second": 6.422, |
|
"eval_steps_per_second": 1.612, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03751065643648764, |
|
"grad_norm": 0.8048145771026611, |
|
"learning_rate": 1.3043478260869566e-05, |
|
"loss": 1.3784, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0392156862745098, |
|
"grad_norm": 0.7263695001602173, |
|
"learning_rate": 8.695652173913044e-06, |
|
"loss": 1.5492, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04092071611253197, |
|
"grad_norm": 0.693609356880188, |
|
"learning_rate": 4.347826086956522e-06, |
|
"loss": 1.2394, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04092071611253197, |
|
"eval_loss": 1.1968275308609009, |
|
"eval_runtime": 115.6679, |
|
"eval_samples_per_second": 6.406, |
|
"eval_steps_per_second": 1.608, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.042625745950554135, |
|
"grad_norm": 0.8267568945884705, |
|
"learning_rate": 0.0, |
|
"loss": 1.0747, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 25, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6105266286166016e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|