|
{ |
|
"best_metric": 35.363951655281134, |
|
"best_model_checkpoint": "/models/hfhub/whisper-tiny-ft-ccv-en-cy/checkpoint-4000", |
|
"epoch": 1.005593614480548, |
|
"eval_steps": 1000, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 5e-06, |
|
"loss": 1.8194, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9849, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.285714285714288e-06, |
|
"loss": 0.7915, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 0.7059, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.7390016913414001, |
|
"eval_runtime": 4754.2409, |
|
"eval_samples_per_second": 5.517, |
|
"eval_steps_per_second": 0.69, |
|
"eval_wer": 44.49101418812402, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.857142857142858e-06, |
|
"loss": 0.6376, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 7.1428571428571436e-06, |
|
"loss": 0.6106, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 6.4285714285714295e-06, |
|
"loss": 0.5788, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.5534, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.6115821003913879, |
|
"eval_runtime": 4753.0136, |
|
"eval_samples_per_second": 5.519, |
|
"eval_steps_per_second": 0.69, |
|
"eval_wer": 39.21681555438781, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5487, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 0.5209, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.5714285714285718e-06, |
|
"loss": 0.5085, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.5087, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.5614925622940063, |
|
"eval_runtime": 4551.9019, |
|
"eval_samples_per_second": 5.763, |
|
"eval_steps_per_second": 0.72, |
|
"eval_wer": 36.01933788754598, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 2.1428571428571427e-06, |
|
"loss": 0.493, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.4285714285714286e-06, |
|
"loss": 0.4827, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 7.142857142857143e-07, |
|
"loss": 0.4841, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0, |
|
"loss": 0.4749, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 0.5461583137512207, |
|
"eval_runtime": 4551.8069, |
|
"eval_samples_per_second": 5.763, |
|
"eval_steps_per_second": 0.72, |
|
"eval_wer": 35.363951655281134, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"step": 4000, |
|
"total_flos": 3.15119267647488e+18, |
|
"train_loss": 0.6689756336212158, |
|
"train_runtime": 34976.9315, |
|
"train_samples_per_second": 3.66, |
|
"train_steps_per_second": 0.114 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 0.5461583137512207, |
|
"eval_runtime": 4567.2196, |
|
"eval_samples_per_second": 5.743, |
|
"eval_steps_per_second": 0.718, |
|
"eval_wer": 35.363951655281134, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 250, |
|
"max_steps": 4000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 1000, |
|
"total_flos": 3.15119267647488e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|