|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 93.74805194805195, |
|
"global_step": 9000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.828125e-06, |
|
"loss": 15.104, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 7.6953125e-06, |
|
"loss": 12.2149, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 1.16015625e-05, |
|
"loss": 4.9885, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.55078125e-05, |
|
"loss": 3.3788, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 1.94140625e-05, |
|
"loss": 3.1705, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 2.3320312499999995e-05, |
|
"loss": 3.11, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 2.72265625e-05, |
|
"loss": 3.0526, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 3.11328125e-05, |
|
"loss": 3.0113, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 9.37, |
|
"learning_rate": 3.5039062499999995e-05, |
|
"loss": 2.9601, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"learning_rate": 3.89453125e-05, |
|
"loss": 2.9099, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"eval_loss": 2.8369038105010986, |
|
"eval_runtime": 198.5866, |
|
"eval_samples_per_second": 25.405, |
|
"eval_steps_per_second": 0.796, |
|
"eval_wer": 1.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 11.46, |
|
"learning_rate": 4.28515625e-05, |
|
"loss": 2.6653, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 4.675781249999999e-05, |
|
"loss": 1.8958, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 13.54, |
|
"learning_rate": 5.0664062499999996e-05, |
|
"loss": 1.4812, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 14.58, |
|
"learning_rate": 5.45703125e-05, |
|
"loss": 1.3358, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"learning_rate": 5.8476562499999996e-05, |
|
"loss": 1.2522, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 16.66, |
|
"learning_rate": 6.238281249999999e-05, |
|
"loss": 1.1919, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 17.71, |
|
"learning_rate": 6.62890625e-05, |
|
"loss": 1.1663, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 7.019531249999999e-05, |
|
"loss": 1.1289, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 19.79, |
|
"learning_rate": 7.410156249999999e-05, |
|
"loss": 1.0902, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 20.83, |
|
"learning_rate": 7.4248046875e-05, |
|
"loss": 1.0745, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 20.83, |
|
"eval_loss": 0.19572903215885162, |
|
"eval_runtime": 202.06, |
|
"eval_samples_per_second": 24.968, |
|
"eval_steps_per_second": 0.782, |
|
"eval_wer": 0.16725371193237237, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 21.87, |
|
"learning_rate": 7.327148437499999e-05, |
|
"loss": 1.0485, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 22.91, |
|
"learning_rate": 7.2294921875e-05, |
|
"loss": 1.0291, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 23.96, |
|
"learning_rate": 7.1318359375e-05, |
|
"loss": 1.007, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 7.0341796875e-05, |
|
"loss": 1.0008, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 26.04, |
|
"learning_rate": 6.9365234375e-05, |
|
"loss": 0.988, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 27.08, |
|
"learning_rate": 6.8388671875e-05, |
|
"loss": 0.9766, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 28.12, |
|
"learning_rate": 6.7412109375e-05, |
|
"loss": 0.9663, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 29.17, |
|
"learning_rate": 6.6435546875e-05, |
|
"loss": 0.9539, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 30.21, |
|
"learning_rate": 6.545898437499999e-05, |
|
"loss": 0.9479, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"learning_rate": 6.448242187499999e-05, |
|
"loss": 0.934, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"eval_loss": 0.1579357087612152, |
|
"eval_runtime": 198.7733, |
|
"eval_samples_per_second": 25.381, |
|
"eval_steps_per_second": 0.795, |
|
"eval_wer": 0.1388858784003468, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 32.29, |
|
"learning_rate": 6.3505859375e-05, |
|
"loss": 0.9285, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 6.252929687499999e-05, |
|
"loss": 0.9121, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 34.37, |
|
"learning_rate": 6.155273437499999e-05, |
|
"loss": 0.9016, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 35.42, |
|
"learning_rate": 6.0576171875e-05, |
|
"loss": 0.9023, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 36.46, |
|
"learning_rate": 5.9599609374999994e-05, |
|
"loss": 0.9004, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 5.862304687499999e-05, |
|
"loss": 0.8844, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 38.54, |
|
"learning_rate": 5.7646484375e-05, |
|
"loss": 0.8771, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 39.58, |
|
"learning_rate": 5.6669921875e-05, |
|
"loss": 0.876, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 40.62, |
|
"learning_rate": 5.569335937499999e-05, |
|
"loss": 0.8708, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 41.66, |
|
"learning_rate": 5.4716796874999997e-05, |
|
"loss": 0.8691, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 41.66, |
|
"eval_loss": 0.14571049809455872, |
|
"eval_runtime": 195.237, |
|
"eval_samples_per_second": 25.84, |
|
"eval_steps_per_second": 0.809, |
|
"eval_wer": 0.12899642353961202, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 42.71, |
|
"learning_rate": 5.3740234374999996e-05, |
|
"loss": 0.8624, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 43.75, |
|
"learning_rate": 5.2763671874999995e-05, |
|
"loss": 0.8556, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 44.79, |
|
"learning_rate": 5.1787109375e-05, |
|
"loss": 0.8607, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 45.83, |
|
"learning_rate": 5.0810546875e-05, |
|
"loss": 0.8536, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 46.87, |
|
"learning_rate": 4.983398437499999e-05, |
|
"loss": 0.8493, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 47.91, |
|
"learning_rate": 4.8857421875e-05, |
|
"loss": 0.8456, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 48.96, |
|
"learning_rate": 4.7880859375e-05, |
|
"loss": 0.8333, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 4.6904296874999996e-05, |
|
"loss": 0.8346, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 51.04, |
|
"learning_rate": 4.5927734375e-05, |
|
"loss": 0.8403, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 52.08, |
|
"learning_rate": 4.4951171874999995e-05, |
|
"loss": 0.8328, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 52.08, |
|
"eval_loss": 0.14348936080932617, |
|
"eval_runtime": 197.7739, |
|
"eval_samples_per_second": 25.509, |
|
"eval_steps_per_second": 0.799, |
|
"eval_wer": 0.12054297171344966, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 53.12, |
|
"learning_rate": 4.3974609374999994e-05, |
|
"loss": 0.8275, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 54.17, |
|
"learning_rate": 4.2998046875e-05, |
|
"loss": 0.8262, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 55.21, |
|
"learning_rate": 4.2021484375e-05, |
|
"loss": 0.8167, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 56.25, |
|
"learning_rate": 4.1044921875e-05, |
|
"loss": 0.8194, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 57.29, |
|
"learning_rate": 4.0068359375e-05, |
|
"loss": 0.8192, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 3.9091796874999996e-05, |
|
"loss": 0.8176, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 59.37, |
|
"learning_rate": 3.8115234374999995e-05, |
|
"loss": 0.8115, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 60.42, |
|
"learning_rate": 3.7138671874999994e-05, |
|
"loss": 0.8129, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 61.46, |
|
"learning_rate": 3.6162109375e-05, |
|
"loss": 0.8052, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 3.518554687499999e-05, |
|
"loss": 0.8068, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"eval_loss": 0.13501569628715515, |
|
"eval_runtime": 207.5786, |
|
"eval_samples_per_second": 24.304, |
|
"eval_steps_per_second": 0.761, |
|
"eval_wer": 0.11913406307575593, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 63.54, |
|
"learning_rate": 3.4208984375e-05, |
|
"loss": 0.8004, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 64.58, |
|
"learning_rate": 3.32421875e-05, |
|
"loss": 0.7904, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 65.62, |
|
"learning_rate": 3.2265625e-05, |
|
"loss": 0.7947, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 66.66, |
|
"learning_rate": 3.12890625e-05, |
|
"loss": 0.7981, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 67.71, |
|
"learning_rate": 3.0312499999999998e-05, |
|
"loss": 0.8018, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 68.75, |
|
"learning_rate": 2.93359375e-05, |
|
"loss": 0.7922, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 69.79, |
|
"learning_rate": 2.8359374999999996e-05, |
|
"loss": 0.796, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 70.83, |
|
"learning_rate": 2.73828125e-05, |
|
"loss": 0.7887, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 71.87, |
|
"learning_rate": 2.6406249999999998e-05, |
|
"loss": 0.7796, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 72.91, |
|
"learning_rate": 2.5429687499999997e-05, |
|
"loss": 0.7822, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 72.91, |
|
"eval_loss": 0.1346774846315384, |
|
"eval_runtime": 195.6804, |
|
"eval_samples_per_second": 25.782, |
|
"eval_steps_per_second": 0.807, |
|
"eval_wer": 0.11550341389400672, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 73.96, |
|
"learning_rate": 2.4453125e-05, |
|
"loss": 0.7882, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 2.34765625e-05, |
|
"loss": 0.7821, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 76.04, |
|
"learning_rate": 2.2499999999999998e-05, |
|
"loss": 0.7814, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 77.08, |
|
"learning_rate": 2.1523437499999997e-05, |
|
"loss": 0.784, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 78.12, |
|
"learning_rate": 2.0546875e-05, |
|
"loss": 0.7772, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 79.17, |
|
"learning_rate": 1.95703125e-05, |
|
"loss": 0.7785, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 80.21, |
|
"learning_rate": 1.8593749999999998e-05, |
|
"loss": 0.7731, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 81.25, |
|
"learning_rate": 1.7617187499999997e-05, |
|
"loss": 0.7726, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 82.29, |
|
"learning_rate": 1.6640624999999996e-05, |
|
"loss": 0.7765, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"learning_rate": 1.56640625e-05, |
|
"loss": 0.7769, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"eval_loss": 0.1320926398038864, |
|
"eval_runtime": 197.3103, |
|
"eval_samples_per_second": 25.569, |
|
"eval_steps_per_second": 0.801, |
|
"eval_wer": 0.11306491817492142, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 84.37, |
|
"learning_rate": 1.4687499999999998e-05, |
|
"loss": 0.7713, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 85.42, |
|
"learning_rate": 1.3710937499999999e-05, |
|
"loss": 0.773, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 86.46, |
|
"learning_rate": 1.2734375e-05, |
|
"loss": 0.7668, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 1.1757812499999999e-05, |
|
"loss": 0.7766, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 88.54, |
|
"learning_rate": 1.0781249999999998e-05, |
|
"loss": 0.7596, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 89.58, |
|
"learning_rate": 9.8046875e-06, |
|
"loss": 0.7561, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 90.62, |
|
"learning_rate": 8.837890624999999e-06, |
|
"loss": 0.7585, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 91.66, |
|
"learning_rate": 7.861328124999998e-06, |
|
"loss": 0.7524, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 92.71, |
|
"learning_rate": 6.884765624999999e-06, |
|
"loss": 0.7591, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 93.75, |
|
"learning_rate": 5.908203125e-06, |
|
"loss": 0.7678, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 93.75, |
|
"eval_loss": 0.13205420970916748, |
|
"eval_runtime": 199.4472, |
|
"eval_samples_per_second": 25.295, |
|
"eval_steps_per_second": 0.792, |
|
"eval_wer": 0.11146634875907663, |
|
"step": 9000 |
|
} |
|
], |
|
"max_steps": 9600, |
|
"num_train_epochs": 100, |
|
"total_flos": 1.397913952067139e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|