|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 200.0, |
|
"global_step": 6400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 1.3439999999999998e-05, |
|
"loss": 22.3442, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 2.7439999999999998e-05, |
|
"loss": 5.9373, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 4.1439999999999996e-05, |
|
"loss": 3.7179, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 5.544e-05, |
|
"loss": 3.0789, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"learning_rate": 6.944e-05, |
|
"loss": 2.9287, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"eval_loss": 2.877366304397583, |
|
"eval_runtime": 13.8367, |
|
"eval_samples_per_second": 27.102, |
|
"eval_steps_per_second": 0.867, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 6.886101694915253e-05, |
|
"loss": 2.8183, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 21.88, |
|
"learning_rate": 6.767457627118643e-05, |
|
"loss": 2.4012, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 6.648813559322033e-05, |
|
"loss": 1.6359, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 28.12, |
|
"learning_rate": 6.530169491525422e-05, |
|
"loss": 1.2603, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"learning_rate": 6.411525423728813e-05, |
|
"loss": 1.1182, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"eval_loss": 0.6247776746749878, |
|
"eval_runtime": 13.8909, |
|
"eval_samples_per_second": 26.996, |
|
"eval_steps_per_second": 0.864, |
|
"eval_wer": 0.713119216113228, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 34.38, |
|
"learning_rate": 6.292881355932203e-05, |
|
"loss": 1.0279, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 6.174237288135593e-05, |
|
"loss": 0.9694, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 40.62, |
|
"learning_rate": 6.0555932203389824e-05, |
|
"loss": 0.8993, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 43.75, |
|
"learning_rate": 5.936949152542372e-05, |
|
"loss": 0.8663, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 46.88, |
|
"learning_rate": 5.818305084745762e-05, |
|
"loss": 0.8329, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 46.88, |
|
"eval_loss": 0.5573042631149292, |
|
"eval_runtime": 13.9992, |
|
"eval_samples_per_second": 26.787, |
|
"eval_steps_per_second": 0.857, |
|
"eval_wer": 0.5792052259118128, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 5.6996610169491526e-05, |
|
"loss": 0.8219, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 53.12, |
|
"learning_rate": 5.581016949152542e-05, |
|
"loss": 0.8078, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 56.25, |
|
"learning_rate": 5.4623728813559317e-05, |
|
"loss": 0.7718, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 59.38, |
|
"learning_rate": 5.3437288135593215e-05, |
|
"loss": 0.7365, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 5.2250847457627114e-05, |
|
"loss": 0.7109, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"eval_loss": 0.54203200340271, |
|
"eval_runtime": 13.6769, |
|
"eval_samples_per_second": 27.419, |
|
"eval_steps_per_second": 0.877, |
|
"eval_wer": 0.568317909635275, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 65.62, |
|
"learning_rate": 5.106440677966102e-05, |
|
"loss": 0.691, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 68.75, |
|
"learning_rate": 4.987796610169491e-05, |
|
"loss": 0.69, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 71.88, |
|
"learning_rate": 4.870338983050847e-05, |
|
"loss": 0.6662, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 4.7528813559322025e-05, |
|
"loss": 0.6387, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 78.12, |
|
"learning_rate": 4.634237288135593e-05, |
|
"loss": 0.6295, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 78.12, |
|
"eval_loss": 0.5166199803352356, |
|
"eval_runtime": 13.9794, |
|
"eval_samples_per_second": 26.825, |
|
"eval_steps_per_second": 0.858, |
|
"eval_wer": 0.5394665215024497, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 81.25, |
|
"learning_rate": 4.515593220338983e-05, |
|
"loss": 0.6157, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 84.38, |
|
"learning_rate": 4.396949152542372e-05, |
|
"loss": 0.596, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 4.2783050847457626e-05, |
|
"loss": 0.5883, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 90.62, |
|
"learning_rate": 4.159661016949152e-05, |
|
"loss": 0.5843, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 93.75, |
|
"learning_rate": 4.041016949152542e-05, |
|
"loss": 0.5715, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 93.75, |
|
"eval_loss": 0.5486962795257568, |
|
"eval_runtime": 13.6771, |
|
"eval_samples_per_second": 27.418, |
|
"eval_steps_per_second": 0.877, |
|
"eval_wer": 0.562874251497006, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 96.88, |
|
"learning_rate": 3.922372881355932e-05, |
|
"loss": 0.5581, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 3.8037288135593213e-05, |
|
"loss": 0.5262, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 103.12, |
|
"learning_rate": 3.685084745762712e-05, |
|
"loss": 0.5337, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 106.25, |
|
"learning_rate": 3.566440677966101e-05, |
|
"loss": 0.5229, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 109.38, |
|
"learning_rate": 3.4477966101694916e-05, |
|
"loss": 0.5016, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 109.38, |
|
"eval_loss": 0.5369759202003479, |
|
"eval_runtime": 14.0144, |
|
"eval_samples_per_second": 26.758, |
|
"eval_steps_per_second": 0.856, |
|
"eval_wer": 0.5470876428960262, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 3.329152542372881e-05, |
|
"loss": 0.4817, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 115.62, |
|
"learning_rate": 3.2105084745762706e-05, |
|
"loss": 0.4959, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 118.75, |
|
"learning_rate": 3.0918644067796605e-05, |
|
"loss": 0.4786, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 121.88, |
|
"learning_rate": 2.9732203389830507e-05, |
|
"loss": 0.4671, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 125.0, |
|
"learning_rate": 2.8545762711864405e-05, |
|
"loss": 0.4661, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 125.0, |
|
"eval_loss": 0.5621253252029419, |
|
"eval_runtime": 13.7208, |
|
"eval_samples_per_second": 27.331, |
|
"eval_steps_per_second": 0.875, |
|
"eval_wer": 0.5394665215024497, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 128.12, |
|
"learning_rate": 2.7359322033898304e-05, |
|
"loss": 0.4538, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 131.25, |
|
"learning_rate": 2.61728813559322e-05, |
|
"loss": 0.4482, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 134.38, |
|
"learning_rate": 2.4986440677966097e-05, |
|
"loss": 0.4518, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 137.5, |
|
"learning_rate": 2.38e-05, |
|
"loss": 0.4371, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 140.62, |
|
"learning_rate": 2.2613559322033898e-05, |
|
"loss": 0.423, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 140.62, |
|
"eval_loss": 0.5657923221588135, |
|
"eval_runtime": 13.7521, |
|
"eval_samples_per_second": 27.269, |
|
"eval_steps_per_second": 0.873, |
|
"eval_wer": 0.5247686445291235, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 143.75, |
|
"learning_rate": 2.1427118644067796e-05, |
|
"loss": 0.4134, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 146.88, |
|
"learning_rate": 2.024067796610169e-05, |
|
"loss": 0.4014, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 150.0, |
|
"learning_rate": 1.905423728813559e-05, |
|
"loss": 0.4001, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 153.12, |
|
"learning_rate": 1.786779661016949e-05, |
|
"loss": 0.3777, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 156.25, |
|
"learning_rate": 1.6681355932203387e-05, |
|
"loss": 0.3793, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 156.25, |
|
"eval_loss": 0.5920763611793518, |
|
"eval_runtime": 13.612, |
|
"eval_samples_per_second": 27.549, |
|
"eval_steps_per_second": 0.882, |
|
"eval_wer": 0.4980947196516059, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 159.38, |
|
"learning_rate": 1.549491525423729e-05, |
|
"loss": 0.3843, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 162.5, |
|
"learning_rate": 1.4308474576271186e-05, |
|
"loss": 0.3918, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 165.62, |
|
"learning_rate": 1.3122033898305082e-05, |
|
"loss": 0.3593, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 168.75, |
|
"learning_rate": 1.1935593220338983e-05, |
|
"loss": 0.3674, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 171.88, |
|
"learning_rate": 1.074915254237288e-05, |
|
"loss": 0.3651, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 171.88, |
|
"eval_loss": 0.5986579060554504, |
|
"eval_runtime": 13.8361, |
|
"eval_samples_per_second": 27.103, |
|
"eval_steps_per_second": 0.867, |
|
"eval_wer": 0.48884050081654873, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 175.0, |
|
"learning_rate": 9.56271186440678e-06, |
|
"loss": 0.345, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 178.12, |
|
"learning_rate": 8.376271186440677e-06, |
|
"loss": 0.3527, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 181.25, |
|
"learning_rate": 7.189830508474576e-06, |
|
"loss": 0.3446, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 184.38, |
|
"learning_rate": 6.003389830508474e-06, |
|
"loss": 0.3461, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 187.5, |
|
"learning_rate": 4.816949152542372e-06, |
|
"loss": 0.3351, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 187.5, |
|
"eval_loss": 0.6017099022865295, |
|
"eval_runtime": 13.6253, |
|
"eval_samples_per_second": 27.522, |
|
"eval_steps_per_second": 0.881, |
|
"eval_wer": 0.49482852476864453, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 190.62, |
|
"learning_rate": 3.630508474576271e-06, |
|
"loss": 0.3368, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 193.75, |
|
"learning_rate": 2.444067796610169e-06, |
|
"loss": 0.3405, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 196.88, |
|
"learning_rate": 1.2576271186440677e-06, |
|
"loss": 0.3271, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"learning_rate": 7.118644067796609e-08, |
|
"loss": 0.334, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"step": 6400, |
|
"total_flos": 2.9581900723233317e+19, |
|
"train_loss": 1.1891312944889068, |
|
"train_runtime": 10628.5861, |
|
"train_samples_per_second": 18.968, |
|
"train_steps_per_second": 0.602 |
|
} |
|
], |
|
"max_steps": 6400, |
|
"num_train_epochs": 200, |
|
"total_flos": 2.9581900723233317e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|