|
{ |
|
"best_metric": 0.26187804341316223, |
|
"best_model_checkpoint": "./checkpoint-6000", |
|
"epoch": 1.9998784982382245, |
|
"global_step": 6172, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 3.7499999999999997e-06, |
|
"loss": 12.1043, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 7.499999999999999e-06, |
|
"loss": 6.4771, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.1249999999999999e-05, |
|
"loss": 4.4866, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 3.8842, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.8712499999999997e-05, |
|
"loss": 3.495, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 3.3882696628570557, |
|
"eval_runtime": 721.337, |
|
"eval_samples_per_second": 22.099, |
|
"eval_steps_per_second": 1.382, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 2.2462499999999997e-05, |
|
"loss": 3.171, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 2.6212499999999997e-05, |
|
"loss": 3.0275, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 2.99625e-05, |
|
"loss": 2.9681, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 3.37125e-05, |
|
"loss": 2.9347, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.7462499999999996e-05, |
|
"loss": 2.9095, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_loss": 2.9152133464813232, |
|
"eval_runtime": 718.1623, |
|
"eval_samples_per_second": 22.197, |
|
"eval_steps_per_second": 1.388, |
|
"eval_wer": 0.9999871219487068, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.12125e-05, |
|
"loss": 2.8888, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.4962499999999995e-05, |
|
"loss": 2.8347, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.871249999999999e-05, |
|
"loss": 2.5318, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 5.2462499999999994e-05, |
|
"loss": 2.0502, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 5.62125e-05, |
|
"loss": 1.8434, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_loss": 1.0473320484161377, |
|
"eval_runtime": 720.1235, |
|
"eval_samples_per_second": 22.136, |
|
"eval_steps_per_second": 1.384, |
|
"eval_wer": 0.7446153648029981, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 5.9962499999999994e-05, |
|
"loss": 1.7339, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 6.367499999999999e-05, |
|
"loss": 1.6535, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 6.7425e-05, |
|
"loss": 1.5793, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.1175e-05, |
|
"loss": 1.5056, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.492499999999999e-05, |
|
"loss": 1.4298, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_loss": 0.5728740692138672, |
|
"eval_runtime": 712.5783, |
|
"eval_samples_per_second": 22.371, |
|
"eval_steps_per_second": 1.399, |
|
"eval_wer": 0.5129521000882147, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 7.325623202301054e-05, |
|
"loss": 1.3592, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 7.145853307766058e-05, |
|
"loss": 1.2917, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 6.966083413231063e-05, |
|
"loss": 1.2536, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 6.788111217641419e-05, |
|
"loss": 1.2345, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 6.608341323106423e-05, |
|
"loss": 1.1937, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_loss": 0.3795304000377655, |
|
"eval_runtime": 716.4435, |
|
"eval_samples_per_second": 22.25, |
|
"eval_steps_per_second": 1.392, |
|
"eval_wer": 0.34504806732645216, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 6.428571428571427e-05, |
|
"loss": 1.1806, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 6.248801534036433e-05, |
|
"loss": 1.1651, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 6.069031639501438e-05, |
|
"loss": 1.1455, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 5.889261744966442e-05, |
|
"loss": 1.1312, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.709491850431447e-05, |
|
"loss": 1.1248, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_loss": 0.3320523500442505, |
|
"eval_runtime": 716.2808, |
|
"eval_samples_per_second": 22.255, |
|
"eval_steps_per_second": 1.392, |
|
"eval_wer": 0.30515830344552264, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 5.5297219558964525e-05, |
|
"loss": 1.1017, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 5.3499520613614567e-05, |
|
"loss": 1.0978, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 5.1701821668264615e-05, |
|
"loss": 1.0954, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.990412272291467e-05, |
|
"loss": 1.0867, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 4.812440076701821e-05, |
|
"loss": 1.0835, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"eval_loss": 0.3037940561771393, |
|
"eval_runtime": 714.1597, |
|
"eval_samples_per_second": 22.321, |
|
"eval_steps_per_second": 1.396, |
|
"eval_wer": 0.2805032742445413, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 4.632670182166826e-05, |
|
"loss": 1.0808, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.4529002876318304e-05, |
|
"loss": 1.0648, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 4.273130393096836e-05, |
|
"loss": 1.0541, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 4.093360498561841e-05, |
|
"loss": 1.0621, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 3.913590604026845e-05, |
|
"loss": 1.0479, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_loss": 0.2910499572753906, |
|
"eval_runtime": 718.1665, |
|
"eval_samples_per_second": 22.197, |
|
"eval_steps_per_second": 1.388, |
|
"eval_wer": 0.26888727197800427, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 3.7338207094918506e-05, |
|
"loss": 1.0428, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.555848513902205e-05, |
|
"loss": 1.047, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 3.37607861936721e-05, |
|
"loss": 1.0397, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 3.1963087248322145e-05, |
|
"loss": 1.0347, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 3.0165388302972194e-05, |
|
"loss": 1.0413, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 0.27976545691490173, |
|
"eval_runtime": 713.7382, |
|
"eval_samples_per_second": 22.335, |
|
"eval_steps_per_second": 1.397, |
|
"eval_wer": 0.2592995627901586, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 2.836768935762224e-05, |
|
"loss": 1.0238, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 2.656999041227229e-05, |
|
"loss": 1.0269, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 2.4772291466922337e-05, |
|
"loss": 1.021, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 2.2974592521572386e-05, |
|
"loss": 1.0186, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 2.1176893576222434e-05, |
|
"loss": 1.014, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_loss": 0.27265554666519165, |
|
"eval_runtime": 707.3075, |
|
"eval_samples_per_second": 22.538, |
|
"eval_steps_per_second": 1.41, |
|
"eval_wer": 0.25117351242409997, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 1.9379194630872483e-05, |
|
"loss": 1.0074, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 1.759947267497603e-05, |
|
"loss": 1.0193, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 1.5801773729626078e-05, |
|
"loss": 1.0044, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 1.4004074784276125e-05, |
|
"loss": 1.0005, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.2206375838926173e-05, |
|
"loss": 1.004, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"eval_loss": 0.26460376381874084, |
|
"eval_runtime": 719.6956, |
|
"eval_samples_per_second": 22.15, |
|
"eval_steps_per_second": 1.385, |
|
"eval_wer": 0.2470782921128375, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 1.0408676893576222e-05, |
|
"loss": 1.0048, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 8.610977948226269e-06, |
|
"loss": 0.9988, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.813279002876318e-06, |
|
"loss": 0.9919, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 5.015580057526366e-06, |
|
"loss": 0.9886, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 3.217881112176414e-06, |
|
"loss": 0.9949, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_loss": 0.26187804341316223, |
|
"eval_runtime": 717.4473, |
|
"eval_samples_per_second": 22.219, |
|
"eval_steps_per_second": 1.39, |
|
"eval_wer": 0.24574541380398318, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.4201821668264622e-06, |
|
"loss": 0.9931, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 6172, |
|
"total_flos": 1.1573983785360925e+20, |
|
"train_loss": 1.788894365302016, |
|
"train_runtime": 52105.5599, |
|
"train_samples_per_second": 15.163, |
|
"train_steps_per_second": 0.118 |
|
} |
|
], |
|
"max_steps": 6172, |
|
"num_train_epochs": 2, |
|
"total_flos": 1.1573983785360925e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|