|
{ |
|
"best_metric": 2.3011960983276367, |
|
"best_model_checkpoint": "output/rammstein/checkpoint-96", |
|
"epoch": 2.0, |
|
"global_step": 96, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.64059311663575e-06, |
|
"loss": 2.9482, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.4175960856021253e-05, |
|
"loss": 2.7279, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.048788201485529e-05, |
|
"loss": 3.0408, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 5.084501350596709e-05, |
|
"loss": 2.8174, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 7.308665466518777e-05, |
|
"loss": 2.9266, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 9.485208346024518e-05, |
|
"loss": 2.7754, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00011383112291586474, |
|
"loss": 2.4825, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00012800934269961248, |
|
"loss": 2.6286, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001358818702356616, |
|
"loss": 2.681, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.4657435417175293, |
|
"eval_runtime": 0.7344, |
|
"eval_samples_per_second": 81.697, |
|
"eval_steps_per_second": 10.893, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0001366131174902434, |
|
"loss": 2.5211, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001301254700691424, |
|
"loss": 2.3708, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00011710752518939717, |
|
"loss": 2.4006, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 9.894100414902355e-05, |
|
"loss": 2.3993, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 7.755409678629555e-05, |
|
"loss": 2.4798, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 5.5216803909693664e-05, |
|
"loss": 2.4973, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 3.4300000000000054e-05, |
|
"loss": 2.2237, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 1.7023789206942107e-05, |
|
"loss": 2.2823, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 5.221864069725753e-06, |
|
"loss": 2.2183, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.4687786583180135e-07, |
|
"loss": 2.2238, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.3011960983276367, |
|
"eval_runtime": 0.7368, |
|
"eval_samples_per_second": 81.434, |
|
"eval_steps_per_second": 10.858, |
|
"step": 96 |
|
} |
|
], |
|
"max_steps": 96, |
|
"num_train_epochs": 2, |
|
"total_flos": 99552264192000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|