|
{ |
|
"best_metric": 18.70514087587693, |
|
"best_model_checkpoint": "/models/hfhub/whisper-large-v3-ft-cy/checkpoint-3000", |
|
"epoch": 1.005593614480548, |
|
"eval_steps": 1000, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4589, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1e-05, |
|
"loss": 0.2548, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.285714285714288e-06, |
|
"loss": 0.2358, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 0.2078, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.21982243657112122, |
|
"eval_runtime": 30133.785, |
|
"eval_samples_per_second": 0.87, |
|
"eval_steps_per_second": 0.109, |
|
"eval_wer": 28.755609922068665, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.857142857142858e-06, |
|
"loss": 0.1921, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 7.1428571428571436e-06, |
|
"loss": 0.1803, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 6.4285714285714295e-06, |
|
"loss": 0.1714, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.1623, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.1799924671649933, |
|
"eval_runtime": 31181.9539, |
|
"eval_samples_per_second": 0.841, |
|
"eval_steps_per_second": 0.105, |
|
"eval_wer": 31.369793101213528, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1592, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 0.1451, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.5714285714285718e-06, |
|
"loss": 0.1447, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.1417, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.15845398604869843, |
|
"eval_runtime": 32690.2238, |
|
"eval_samples_per_second": 0.802, |
|
"eval_steps_per_second": 0.1, |
|
"eval_wer": 18.70514087587693, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 2.1428571428571427e-06, |
|
"loss": 0.1342, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.4285714285714286e-06, |
|
"loss": 0.1315, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 7.142857142857143e-07, |
|
"loss": 0.1285, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0, |
|
"loss": 0.1188, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 0.14796172082424164, |
|
"eval_runtime": 32177.5808, |
|
"eval_samples_per_second": 0.815, |
|
"eval_steps_per_second": 0.102, |
|
"eval_wer": 25.134071061713193, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"step": 4000, |
|
"total_flos": 4.3487638574137344e+20, |
|
"train_loss": 0.18544992589950562, |
|
"train_runtime": 248926.3333, |
|
"train_samples_per_second": 0.514, |
|
"train_steps_per_second": 0.016 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 0.14796172082424164, |
|
"eval_runtime": 30969.981, |
|
"eval_samples_per_second": 0.847, |
|
"eval_steps_per_second": 0.106, |
|
"eval_wer": 25.134071061713193, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 250, |
|
"max_steps": 4000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 1000, |
|
"total_flos": 4.3487638574137344e+20, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|