|
{ |
|
"best_metric": 0.42575815320014954, |
|
"best_model_checkpoint": "speecht5_finetuned_bphc/checkpoint-500", |
|
"epoch": 2.6595744680851063, |
|
"eval_steps": 100, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13297872340425532, |
|
"grad_norm": 3.7797725200653076, |
|
"learning_rate": 2.4e-05, |
|
"loss": 1.0695, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.26595744680851063, |
|
"grad_norm": 4.549177169799805, |
|
"learning_rate": 4.9e-05, |
|
"loss": 0.7576, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.39893617021276595, |
|
"grad_norm": 4.847335338592529, |
|
"learning_rate": 7.2e-05, |
|
"loss": 0.6571, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.5319148936170213, |
|
"grad_norm": 8.47089958190918, |
|
"learning_rate": 9.7e-05, |
|
"loss": 0.624, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5319148936170213, |
|
"eval_loss": 0.5379713177680969, |
|
"eval_runtime": 18.8881, |
|
"eval_samples_per_second": 35.419, |
|
"eval_steps_per_second": 17.736, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6648936170212766, |
|
"grad_norm": 3.0278241634368896, |
|
"learning_rate": 9.449999999999999e-05, |
|
"loss": 0.595, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.7978723404255319, |
|
"grad_norm": 4.422701358795166, |
|
"learning_rate": 8.825e-05, |
|
"loss": 0.5809, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.9308510638297872, |
|
"grad_norm": 4.076521396636963, |
|
"learning_rate": 8.2e-05, |
|
"loss": 0.5634, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.0638297872340425, |
|
"grad_norm": 5.123497009277344, |
|
"learning_rate": 7.575e-05, |
|
"loss": 0.5563, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0638297872340425, |
|
"eval_loss": 0.5066816210746765, |
|
"eval_runtime": 18.5524, |
|
"eval_samples_per_second": 36.06, |
|
"eval_steps_per_second": 18.057, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.196808510638298, |
|
"grad_norm": 17.09893226623535, |
|
"learning_rate": 6.95e-05, |
|
"loss": 0.5311, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.3297872340425532, |
|
"grad_norm": 2.976322889328003, |
|
"learning_rate": 6.324999999999999e-05, |
|
"loss": 0.5287, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.4627659574468086, |
|
"grad_norm": 2.6360063552856445, |
|
"learning_rate": 5.6999999999999996e-05, |
|
"loss": 0.5182, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.5957446808510638, |
|
"grad_norm": 2.318189859390259, |
|
"learning_rate": 5.075e-05, |
|
"loss": 0.5121, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.5957446808510638, |
|
"eval_loss": 0.46897533535957336, |
|
"eval_runtime": 18.3638, |
|
"eval_samples_per_second": 36.43, |
|
"eval_steps_per_second": 18.242, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.728723404255319, |
|
"grad_norm": 4.144455909729004, |
|
"learning_rate": 4.4500000000000004e-05, |
|
"loss": 0.5006, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.8617021276595744, |
|
"grad_norm": 3.1114983558654785, |
|
"learning_rate": 3.825e-05, |
|
"loss": 0.493, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.9946808510638299, |
|
"grad_norm": 4.484862804412842, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.4862, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.127659574468085, |
|
"grad_norm": 7.771288871765137, |
|
"learning_rate": 2.5750000000000002e-05, |
|
"loss": 0.4795, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.127659574468085, |
|
"eval_loss": 0.4404144883155823, |
|
"eval_runtime": 18.5295, |
|
"eval_samples_per_second": 36.105, |
|
"eval_steps_per_second": 18.079, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.2606382978723403, |
|
"grad_norm": 3.972978115081787, |
|
"learning_rate": 1.9500000000000003e-05, |
|
"loss": 0.478, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.393617021276596, |
|
"grad_norm": 4.415282726287842, |
|
"learning_rate": 1.3250000000000002e-05, |
|
"loss": 0.4722, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.526595744680851, |
|
"grad_norm": 3.901869773864746, |
|
"learning_rate": 7.000000000000001e-06, |
|
"loss": 0.4623, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.6595744680851063, |
|
"grad_norm": 6.222782135009766, |
|
"learning_rate": 7.5e-07, |
|
"loss": 0.4622, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.6595744680851063, |
|
"eval_loss": 0.42575815320014954, |
|
"eval_runtime": 18.6784, |
|
"eval_samples_per_second": 35.817, |
|
"eval_steps_per_second": 17.935, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1478228411981376.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|