|
{ |
|
"best_metric": 0.47801004237740824, |
|
"best_model_checkpoint": "./whisper-tiny-ckb/checkpoint-600", |
|
"epoch": 4.3478260869565215, |
|
"eval_steps": 100, |
|
"global_step": 600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.6164804129562553e-06, |
|
"loss": 3.7615, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.980934789368156e-06, |
|
"loss": 2.8759, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 5.99823228863792e-06, |
|
"loss": 2.0115, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 6.655623437887147e-06, |
|
"loss": 1.4273, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 7.1422479480192775e-06, |
|
"loss": 1.0629, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 7.528775214088733e-06, |
|
"loss": 0.864, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 7.849441238868767e-06, |
|
"loss": 0.69, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.123456553166724e-06, |
|
"loss": 0.5759, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 8.36268666504982e-06, |
|
"loss": 0.5048, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 8.574976672697987e-06, |
|
"loss": 0.4502, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_loss": 0.498779296875, |
|
"eval_runtime": 154.3428, |
|
"eval_samples_per_second": 32.007, |
|
"eval_steps_per_second": 0.065, |
|
"eval_wer": 0.7165699227235497, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 8.765784640355151e-06, |
|
"loss": 0.4391, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.93906144297322e-06, |
|
"loss": 0.4092, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.09775921698996e-06, |
|
"loss": 0.386, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 9.244141981517345e-06, |
|
"loss": 0.3711, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 9.379984164319582e-06, |
|
"loss": 0.3478, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 9.50670232443118e-06, |
|
"loss": 0.3501, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 9.625445353695127e-06, |
|
"loss": 0.3344, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 9.73715793581418e-06, |
|
"loss": 0.3167, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 9.842626246843015e-06, |
|
"loss": 0.3082, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 9.942511545878664e-06, |
|
"loss": 0.2977, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"eval_loss": 0.355712890625, |
|
"eval_runtime": 154.4865, |
|
"eval_samples_per_second": 31.977, |
|
"eval_steps_per_second": 0.065, |
|
"eval_wer": 0.5858765713471742, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 9.925e-06, |
|
"loss": 0.3017, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.675000000000001e-06, |
|
"loss": 0.2896, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 9.425e-06, |
|
"loss": 0.2844, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 9.175000000000001e-06, |
|
"loss": 0.2729, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 8.925e-06, |
|
"loss": 0.2675, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 8.675e-06, |
|
"loss": 0.2796, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 8.425000000000001e-06, |
|
"loss": 0.2726, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 8.175e-06, |
|
"loss": 0.2623, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 7.925000000000001e-06, |
|
"loss": 0.2456, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 7.675e-06, |
|
"loss": 0.2494, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"eval_loss": 0.3095703125, |
|
"eval_runtime": 165.9821, |
|
"eval_samples_per_second": 29.762, |
|
"eval_steps_per_second": 0.06, |
|
"eval_wer": 0.5314625547523236, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 7.425000000000001e-06, |
|
"loss": 0.2531, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 7.175000000000001e-06, |
|
"loss": 0.2496, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 6.925000000000001e-06, |
|
"loss": 0.2421, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 6.6750000000000005e-06, |
|
"loss": 0.2337, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 6.425e-06, |
|
"loss": 0.2358, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 6.175000000000001e-06, |
|
"loss": 0.2433, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.925000000000001e-06, |
|
"loss": 0.2335, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 5.675000000000001e-06, |
|
"loss": 0.2301, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.4250000000000006e-06, |
|
"loss": 0.2223, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.1750000000000004e-06, |
|
"loss": 0.2224, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"eval_loss": 0.28173828125, |
|
"eval_runtime": 152.093, |
|
"eval_samples_per_second": 32.48, |
|
"eval_steps_per_second": 0.066, |
|
"eval_wer": 0.5008012535166126, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 4.925e-06, |
|
"loss": 0.2293, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 4.675000000000001e-06, |
|
"loss": 0.2255, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 4.425e-06, |
|
"loss": 0.2131, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 4.175e-06, |
|
"loss": 0.2106, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.9250000000000005e-06, |
|
"loss": 0.2176, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.6750000000000004e-06, |
|
"loss": 0.22, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 3.4250000000000007e-06, |
|
"loss": 0.2151, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 3.175e-06, |
|
"loss": 0.2082, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 2.925e-06, |
|
"loss": 0.2014, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 2.6750000000000002e-06, |
|
"loss": 0.2148, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"eval_loss": 0.2666015625, |
|
"eval_runtime": 152.4154, |
|
"eval_samples_per_second": 32.411, |
|
"eval_steps_per_second": 0.066, |
|
"eval_wer": 0.481927281791959, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 2.425e-06, |
|
"loss": 0.2158, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 2.1750000000000004e-06, |
|
"loss": 0.2103, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.925e-06, |
|
"loss": 0.2091, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 1.6750000000000003e-06, |
|
"loss": 0.1998, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 1.425e-06, |
|
"loss": 0.2068, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.175e-06, |
|
"loss": 0.212, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 9.25e-07, |
|
"loss": 0.2034, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 6.750000000000001e-07, |
|
"loss": 0.2013, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 4.2500000000000006e-07, |
|
"loss": 0.1919, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 1.7500000000000002e-07, |
|
"loss": 0.2096, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"eval_loss": 0.26123046875, |
|
"eval_runtime": 154.28, |
|
"eval_samples_per_second": 32.02, |
|
"eval_steps_per_second": 0.065, |
|
"eval_wer": 0.47801004237740824, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"step": 600, |
|
"total_flos": 1.1344381987478569e+19, |
|
"train_loss": 0.4515265909830729, |
|
"train_runtime": 14971.1378, |
|
"train_samples_per_second": 30.779, |
|
"train_steps_per_second": 0.04 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 600, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 100, |
|
"total_flos": 1.1344381987478569e+19, |
|
"train_batch_size": 192, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|