|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.999581414817916, |
|
"global_step": 5970, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.9000000000000005e-05, |
|
"loss": 5.3488, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 9.900000000000001e-05, |
|
"loss": 3.2572, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.000149, |
|
"loss": 2.4392, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.000199, |
|
"loss": 2.2566, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.000249, |
|
"loss": 2.2416, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_loss": 1.2866647243499756, |
|
"eval_runtime": 434.4225, |
|
"eval_samples_per_second": 23.912, |
|
"eval_steps_per_second": 2.99, |
|
"eval_wer": 0.8874521338587047, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.000299, |
|
"loss": 2.2596, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00034899999999999997, |
|
"loss": 2.2575, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00039900000000000005, |
|
"loss": 2.2978, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.000449, |
|
"loss": 2.2998, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.000499, |
|
"loss": 2.3089, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"eval_loss": 1.8336485624313354, |
|
"eval_runtime": 430.7741, |
|
"eval_samples_per_second": 24.115, |
|
"eval_steps_per_second": 3.016, |
|
"eval_wer": 0.9547514660451005, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.000549, |
|
"loss": 2.3156, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.000599, |
|
"loss": 2.3298, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.0006490000000000001, |
|
"loss": 2.3174, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.000699, |
|
"loss": 2.349, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.000749, |
|
"loss": 2.3614, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"eval_loss": 1.5936506986618042, |
|
"eval_runtime": 430.1389, |
|
"eval_samples_per_second": 24.15, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.9468893945279983, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.000799, |
|
"loss": 2.3597, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.000849, |
|
"loss": 2.397, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.0008990000000000001, |
|
"loss": 2.4454, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.000949, |
|
"loss": 2.4806, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.000999, |
|
"loss": 2.5234, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"eval_loss": 1.9764641523361206, |
|
"eval_runtime": 434.6883, |
|
"eval_samples_per_second": 23.898, |
|
"eval_steps_per_second": 2.988, |
|
"eval_wer": 0.9866992248922434, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.0009753148614609572, |
|
"loss": 2.5559, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.0009501259445843828, |
|
"loss": 2.5543, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 0.0009249370277078086, |
|
"loss": 2.5819, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.0008997481108312343, |
|
"loss": 2.5837, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 0.00087455919395466, |
|
"loss": 2.5373, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"eval_loss": 1.9062319993972778, |
|
"eval_runtime": 430.3022, |
|
"eval_samples_per_second": 24.141, |
|
"eval_steps_per_second": 3.019, |
|
"eval_wer": 0.9916014577205542, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 0.0008493702770780856, |
|
"loss": 2.5617, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 0.0008241813602015113, |
|
"loss": 2.5553, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 0.0007989924433249371, |
|
"loss": 2.549, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 0.0007738035264483628, |
|
"loss": 2.5636, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 0.0007486146095717884, |
|
"loss": 2.5703, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"eval_loss": 1.977164387702942, |
|
"eval_runtime": 431.1212, |
|
"eval_samples_per_second": 24.095, |
|
"eval_steps_per_second": 3.013, |
|
"eval_wer": 0.9914719647779197, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 0.0007234256926952141, |
|
"loss": 2.5526, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 0.0006982367758186398, |
|
"loss": 2.5277, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 0.0006730478589420656, |
|
"loss": 2.4969, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 0.0006478589420654912, |
|
"loss": 2.4849, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 0.0006226700251889169, |
|
"loss": 2.4656, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"eval_loss": 1.8083465099334717, |
|
"eval_runtime": 432.7157, |
|
"eval_samples_per_second": 24.007, |
|
"eval_steps_per_second": 3.002, |
|
"eval_wer": 0.9829069315722293, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 0.0005974811083123426, |
|
"loss": 2.4858, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 0.0005722921914357682, |
|
"loss": 2.4552, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 0.0005471032745591939, |
|
"loss": 2.4302, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 0.0005219143576826196, |
|
"loss": 2.4397, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 0.0004967254408060454, |
|
"loss": 2.4339, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"eval_loss": 1.754757046699524, |
|
"eval_runtime": 442.6863, |
|
"eval_samples_per_second": 23.466, |
|
"eval_steps_per_second": 2.934, |
|
"eval_wer": 0.9752483489649814, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"learning_rate": 0.00047153652392947104, |
|
"loss": 2.4069, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 0.0004463476070528967, |
|
"loss": 2.3863, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 0.00042115869017632243, |
|
"loss": 2.3614, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"learning_rate": 0.0003959697732997481, |
|
"loss": 2.3534, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"learning_rate": 0.00037078085642317383, |
|
"loss": 2.344, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"eval_loss": 1.6146422624588013, |
|
"eval_runtime": 432.0313, |
|
"eval_samples_per_second": 24.045, |
|
"eval_steps_per_second": 3.007, |
|
"eval_wer": 0.9638344710213294, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 0.0003455919395465995, |
|
"loss": 2.3322, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 7.87, |
|
"learning_rate": 0.0003204030226700252, |
|
"loss": 2.315, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 0.00029521410579345085, |
|
"loss": 2.3035, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 0.0002700251889168766, |
|
"loss": 2.2715, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"learning_rate": 0.00024483627204030224, |
|
"loss": 2.2677, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"eval_loss": 1.5104962587356567, |
|
"eval_runtime": 431.1839, |
|
"eval_samples_per_second": 24.092, |
|
"eval_steps_per_second": 3.013, |
|
"eval_wer": 0.9499232291840095, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"learning_rate": 0.00021964735516372797, |
|
"loss": 2.266, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 0.00019445843828715364, |
|
"loss": 2.2473, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"learning_rate": 0.00016926952141057937, |
|
"loss": 2.2419, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 0.00014408060453400504, |
|
"loss": 2.2305, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 9.21, |
|
"learning_rate": 0.00011889168765743074, |
|
"loss": 2.2074, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 9.21, |
|
"eval_loss": 1.4190884828567505, |
|
"eval_runtime": 440.4999, |
|
"eval_samples_per_second": 23.582, |
|
"eval_steps_per_second": 2.949, |
|
"eval_wer": 0.9356790054942006, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 9.370277078085642e-05, |
|
"loss": 2.1936, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"learning_rate": 6.851385390428212e-05, |
|
"loss": 2.1796, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 4.332493702770781e-05, |
|
"loss": 2.1748, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"learning_rate": 1.8136020151133502e-05, |
|
"loss": 2.1846, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 5970, |
|
"total_flos": 1.0051413716540667e+20, |
|
"train_loss": 2.4458747799871756, |
|
"train_runtime": 25997.2696, |
|
"train_samples_per_second": 14.697, |
|
"train_steps_per_second": 0.23 |
|
} |
|
], |
|
"max_steps": 5970, |
|
"num_train_epochs": 10, |
|
"total_flos": 1.0051413716540667e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|