|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 50.0, |
|
"global_step": 9650, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 3.7125e-06, |
|
"loss": 19.477, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 7.4625e-06, |
|
"loss": 11.5043, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.1212499999999998e-05, |
|
"loss": 7.4168, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.49625e-05, |
|
"loss": 6.086, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.8712499999999997e-05, |
|
"loss": 5.0417, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"eval_loss": 5.148427963256836, |
|
"eval_runtime": 121.8982, |
|
"eval_samples_per_second": 22.092, |
|
"eval_steps_per_second": 2.765, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 2.2462499999999997e-05, |
|
"loss": 4.2126, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 2.6212499999999997e-05, |
|
"loss": 3.6981, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 2.99625e-05, |
|
"loss": 3.496, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 3.37125e-05, |
|
"loss": 3.4391, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 3.7462499999999996e-05, |
|
"loss": 3.3722, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"eval_loss": 3.3380324840545654, |
|
"eval_runtime": 122.9217, |
|
"eval_samples_per_second": 21.908, |
|
"eval_steps_per_second": 2.742, |
|
"eval_wer": 1.0001231729348004, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 4.12125e-05, |
|
"loss": 3.2941, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 4.4962499999999995e-05, |
|
"loss": 3.2158, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"learning_rate": 4.871249999999999e-05, |
|
"loss": 2.7897, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 5.2462499999999994e-05, |
|
"loss": 2.2331, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"learning_rate": 5.62125e-05, |
|
"loss": 1.9752, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"eval_loss": 1.3909666538238525, |
|
"eval_runtime": 123.3452, |
|
"eval_samples_per_second": 21.833, |
|
"eval_steps_per_second": 2.732, |
|
"eval_wer": 1.007431433732961, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 5.9962499999999994e-05, |
|
"loss": 1.8341, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"learning_rate": 6.37125e-05, |
|
"loss": 1.7481, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 6.746249999999999e-05, |
|
"loss": 1.6736, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 9.84, |
|
"learning_rate": 7.121249999999999e-05, |
|
"loss": 1.6481, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"learning_rate": 7.492499999999999e-05, |
|
"loss": 1.5868, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"eval_loss": 1.029773235321045, |
|
"eval_runtime": 125.3959, |
|
"eval_samples_per_second": 21.476, |
|
"eval_steps_per_second": 2.687, |
|
"eval_wer": 1.0084168172113648, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 10.88, |
|
"learning_rate": 7.404901960784314e-05, |
|
"loss": 1.5598, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"learning_rate": 7.306862745098039e-05, |
|
"loss": 1.5205, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 11.92, |
|
"learning_rate": 7.208823529411764e-05, |
|
"loss": 1.4995, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 12.44, |
|
"learning_rate": 7.11078431372549e-05, |
|
"loss": 1.451, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 12.95, |
|
"learning_rate": 7.012745098039215e-05, |
|
"loss": 1.4413, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 12.95, |
|
"eval_loss": 0.9313108921051025, |
|
"eval_runtime": 124.7489, |
|
"eval_samples_per_second": 21.587, |
|
"eval_steps_per_second": 2.701, |
|
"eval_wer": 1.0174905567416652, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 13.47, |
|
"learning_rate": 6.91470588235294e-05, |
|
"loss": 1.3987, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 13.99, |
|
"learning_rate": 6.816666666666665e-05, |
|
"loss": 1.3931, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 14.51, |
|
"learning_rate": 6.718627450980392e-05, |
|
"loss": 1.3632, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"learning_rate": 6.620588235294117e-05, |
|
"loss": 1.3609, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 15.54, |
|
"learning_rate": 6.522549019607843e-05, |
|
"loss": 1.3296, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 15.54, |
|
"eval_loss": 0.8966061472892761, |
|
"eval_runtime": 122.7764, |
|
"eval_samples_per_second": 21.934, |
|
"eval_steps_per_second": 2.745, |
|
"eval_wer": 1.0193792084086057, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 16.06, |
|
"learning_rate": 6.424509803921568e-05, |
|
"loss": 1.3424, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 16.58, |
|
"learning_rate": 6.326470588235293e-05, |
|
"loss": 1.3207, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 17.1, |
|
"learning_rate": 6.229411764705881e-05, |
|
"loss": 1.2947, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 17.62, |
|
"learning_rate": 6.131372549019608e-05, |
|
"loss": 1.2949, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 18.13, |
|
"learning_rate": 6.033333333333333e-05, |
|
"loss": 1.2746, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 18.13, |
|
"eval_loss": 0.8875203728675842, |
|
"eval_runtime": 121.1222, |
|
"eval_samples_per_second": 22.234, |
|
"eval_steps_per_second": 2.782, |
|
"eval_wer": 1.0097306618492363, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 18.65, |
|
"learning_rate": 5.9352941176470584e-05, |
|
"loss": 1.2649, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 19.17, |
|
"learning_rate": 5.837254901960784e-05, |
|
"loss": 1.2446, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 19.69, |
|
"learning_rate": 5.739215686274509e-05, |
|
"loss": 1.2393, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 20.21, |
|
"learning_rate": 5.641176470588235e-05, |
|
"loss": 1.2187, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 20.73, |
|
"learning_rate": 5.54313725490196e-05, |
|
"loss": 1.2147, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 20.73, |
|
"eval_loss": 0.8745562434196472, |
|
"eval_runtime": 124.1654, |
|
"eval_samples_per_second": 21.689, |
|
"eval_steps_per_second": 2.714, |
|
"eval_wer": 1.0089095089505666, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 21.24, |
|
"learning_rate": 5.445098039215686e-05, |
|
"loss": 1.2066, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 21.76, |
|
"learning_rate": 5.347058823529411e-05, |
|
"loss": 1.1995, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 22.28, |
|
"learning_rate": 5.2490196078431365e-05, |
|
"loss": 1.1985, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 22.8, |
|
"learning_rate": 5.150980392156863e-05, |
|
"loss": 1.1856, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 23.32, |
|
"learning_rate": 5.052941176470588e-05, |
|
"loss": 1.1774, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 23.32, |
|
"eval_loss": 0.8383361101150513, |
|
"eval_runtime": 126.9391, |
|
"eval_samples_per_second": 21.215, |
|
"eval_steps_per_second": 2.655, |
|
"eval_wer": 1.019830842502874, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 23.83, |
|
"learning_rate": 4.9549019607843137e-05, |
|
"loss": 1.1655, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 24.35, |
|
"learning_rate": 4.856862745098039e-05, |
|
"loss": 1.1548, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 24.87, |
|
"learning_rate": 4.7588235294117644e-05, |
|
"loss": 1.1539, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 25.39, |
|
"learning_rate": 4.6607843137254895e-05, |
|
"loss": 1.1403, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 25.91, |
|
"learning_rate": 4.562745098039215e-05, |
|
"loss": 1.129, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 25.91, |
|
"eval_loss": 0.7847502827644348, |
|
"eval_runtime": 122.1221, |
|
"eval_samples_per_second": 22.052, |
|
"eval_steps_per_second": 2.76, |
|
"eval_wer": 1.016710461487929, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 26.42, |
|
"learning_rate": 4.464705882352941e-05, |
|
"loss": 1.1236, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 26.94, |
|
"learning_rate": 4.366666666666666e-05, |
|
"loss": 1.1171, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 27.46, |
|
"learning_rate": 4.268627450980392e-05, |
|
"loss": 1.1144, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 27.98, |
|
"learning_rate": 4.170588235294117e-05, |
|
"loss": 1.0954, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.0725490196078425e-05, |
|
"loss": 1.0995, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"eval_loss": 0.7992498874664307, |
|
"eval_runtime": 122.009, |
|
"eval_samples_per_second": 22.072, |
|
"eval_steps_per_second": 2.762, |
|
"eval_wer": 1.0209804565610117, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 29.02, |
|
"learning_rate": 3.974509803921569e-05, |
|
"loss": 1.1048, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 29.53, |
|
"learning_rate": 3.876470588235294e-05, |
|
"loss": 1.0818, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 30.05, |
|
"learning_rate": 3.7784313725490196e-05, |
|
"loss": 1.0747, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 30.57, |
|
"learning_rate": 3.680392156862745e-05, |
|
"loss": 1.0711, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 31.09, |
|
"learning_rate": 3.5823529411764704e-05, |
|
"loss": 1.0665, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 31.09, |
|
"eval_loss": 0.7878175973892212, |
|
"eval_runtime": 122.0835, |
|
"eval_samples_per_second": 22.059, |
|
"eval_steps_per_second": 2.76, |
|
"eval_wer": 1.01071604532764, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 31.61, |
|
"learning_rate": 3.484313725490196e-05, |
|
"loss": 1.0736, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 32.12, |
|
"learning_rate": 3.386274509803921e-05, |
|
"loss": 1.0588, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 32.64, |
|
"learning_rate": 3.288235294117647e-05, |
|
"loss": 1.0569, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 33.16, |
|
"learning_rate": 3.190196078431372e-05, |
|
"loss": 1.0361, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 33.68, |
|
"learning_rate": 3.092156862745098e-05, |
|
"loss": 1.0321, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 33.68, |
|
"eval_loss": 0.7653242945671082, |
|
"eval_runtime": 125.1204, |
|
"eval_samples_per_second": 21.523, |
|
"eval_steps_per_second": 2.693, |
|
"eval_wer": 1.0081704713417639, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 34.2, |
|
"learning_rate": 2.9941176470588234e-05, |
|
"loss": 1.0294, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 34.72, |
|
"learning_rate": 2.8960784313725488e-05, |
|
"loss": 1.0174, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 35.23, |
|
"learning_rate": 2.7980392156862742e-05, |
|
"loss": 1.0086, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 35.75, |
|
"learning_rate": 2.6999999999999996e-05, |
|
"loss": 1.0138, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 36.27, |
|
"learning_rate": 2.601960784313725e-05, |
|
"loss": 1.0068, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 36.27, |
|
"eval_loss": 0.7634544968605042, |
|
"eval_runtime": 131.459, |
|
"eval_samples_per_second": 20.485, |
|
"eval_steps_per_second": 2.564, |
|
"eval_wer": 1.0064871078994908, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 36.79, |
|
"learning_rate": 2.503921568627451e-05, |
|
"loss": 1.0158, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 37.31, |
|
"learning_rate": 2.406862745098039e-05, |
|
"loss": 1.0167, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 37.82, |
|
"learning_rate": 2.3088235294117645e-05, |
|
"loss": 1.0112, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 38.34, |
|
"learning_rate": 2.21078431372549e-05, |
|
"loss": 0.999, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 38.86, |
|
"learning_rate": 2.1127450980392156e-05, |
|
"loss": 0.9916, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 38.86, |
|
"eval_loss": 0.7728472352027893, |
|
"eval_runtime": 123.666, |
|
"eval_samples_per_second": 21.776, |
|
"eval_steps_per_second": 2.725, |
|
"eval_wer": 1.009032681885367, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 39.38, |
|
"learning_rate": 2.014705882352941e-05, |
|
"loss": 0.9859, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 39.9, |
|
"learning_rate": 1.9166666666666664e-05, |
|
"loss": 0.9878, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 40.41, |
|
"learning_rate": 1.818627450980392e-05, |
|
"loss": 0.9844, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 40.93, |
|
"learning_rate": 1.7205882352941175e-05, |
|
"loss": 0.9794, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 41.45, |
|
"learning_rate": 1.6225490196078432e-05, |
|
"loss": 0.9735, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 41.45, |
|
"eval_loss": 0.7687555551528931, |
|
"eval_runtime": 126.9882, |
|
"eval_samples_per_second": 21.207, |
|
"eval_steps_per_second": 2.654, |
|
"eval_wer": 1.0070208572836261, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 41.97, |
|
"learning_rate": 1.5245098039215684e-05, |
|
"loss": 0.9671, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 42.49, |
|
"learning_rate": 1.426470588235294e-05, |
|
"loss": 0.9732, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 43.01, |
|
"learning_rate": 1.3284313725490195e-05, |
|
"loss": 0.9689, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 43.52, |
|
"learning_rate": 1.230392156862745e-05, |
|
"loss": 0.9695, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 44.04, |
|
"learning_rate": 1.1323529411764705e-05, |
|
"loss": 0.9745, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 44.04, |
|
"eval_loss": 0.7455455660820007, |
|
"eval_runtime": 121.9802, |
|
"eval_samples_per_second": 22.077, |
|
"eval_steps_per_second": 2.763, |
|
"eval_wer": 1.0097306618492363, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 44.56, |
|
"learning_rate": 1.034313725490196e-05, |
|
"loss": 0.9536, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 45.08, |
|
"learning_rate": 9.362745098039214e-06, |
|
"loss": 0.9507, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 45.6, |
|
"learning_rate": 8.38235294117647e-06, |
|
"loss": 0.9537, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 46.11, |
|
"learning_rate": 7.4019607843137246e-06, |
|
"loss": 0.943, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 46.63, |
|
"learning_rate": 6.42156862745098e-06, |
|
"loss": 0.9677, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 46.63, |
|
"eval_loss": 0.7604856491088867, |
|
"eval_runtime": 125.3047, |
|
"eval_samples_per_second": 21.492, |
|
"eval_steps_per_second": 2.689, |
|
"eval_wer": 1.0098948924289703, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 47.15, |
|
"learning_rate": 5.441176470588236e-06, |
|
"loss": 0.9385, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 47.67, |
|
"learning_rate": 4.4607843137254895e-06, |
|
"loss": 0.9402, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 48.19, |
|
"learning_rate": 3.480392156862745e-06, |
|
"loss": 0.9361, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 48.7, |
|
"learning_rate": 2.4999999999999998e-06, |
|
"loss": 0.9271, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 49.22, |
|
"learning_rate": 1.519607843137255e-06, |
|
"loss": 0.9313, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 49.22, |
|
"eval_loss": 0.7526801824569702, |
|
"eval_runtime": 121.7181, |
|
"eval_samples_per_second": 22.125, |
|
"eval_steps_per_second": 2.769, |
|
"eval_wer": 1.0096896042043029, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 49.74, |
|
"learning_rate": 5.392156862745097e-07, |
|
"loss": 0.9178, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 9650, |
|
"total_flos": 4.012526296607357e+19, |
|
"train_loss": 1.8175229964972777, |
|
"train_runtime": 20359.4006, |
|
"train_samples_per_second": 15.167, |
|
"train_steps_per_second": 0.474 |
|
} |
|
], |
|
"max_steps": 9650, |
|
"num_train_epochs": 50, |
|
"total_flos": 4.012526296607357e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|