|
{ |
|
"best_metric": 0.4493387111903199, |
|
"best_model_checkpoint": "/workspace/cv-corpus-8.0-2022-01-19/output/checkpoint-1000", |
|
"epoch": 10.0, |
|
"global_step": 15630, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 9.92e-05, |
|
"loss": 6.7916, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00019920000000000002, |
|
"loss": 1.354, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_loss": 0.4108898937702179, |
|
"eval_runtime": 71.0896, |
|
"eval_samples_per_second": 146.125, |
|
"eval_steps_per_second": 1.829, |
|
"eval_wer": 0.4493387111903199, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00019321941216678059, |
|
"loss": 0.7084, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00018638414217361587, |
|
"loss": 0.5886, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_loss": 0.2797781527042389, |
|
"eval_runtime": 71.0704, |
|
"eval_samples_per_second": 146.165, |
|
"eval_steps_per_second": 1.829, |
|
"eval_wer": 0.3099334021198762, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00017954887218045112, |
|
"loss": 0.5386, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0001727136021872864, |
|
"loss": 0.4977, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"eval_loss": 0.23867885768413544, |
|
"eval_runtime": 71.0833, |
|
"eval_samples_per_second": 146.138, |
|
"eval_steps_per_second": 1.829, |
|
"eval_wer": 0.2673295188068662, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00016587833219412168, |
|
"loss": 0.4531, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.00015905673274094328, |
|
"loss": 0.4253, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"eval_loss": 0.22657370567321777, |
|
"eval_runtime": 71.5875, |
|
"eval_samples_per_second": 145.109, |
|
"eval_steps_per_second": 1.816, |
|
"eval_wer": 0.2523215458212175, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00015222146274777856, |
|
"loss": 0.413, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.0001453861927546138, |
|
"loss": 0.3942, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_loss": 0.21706202626228333, |
|
"eval_runtime": 72.0993, |
|
"eval_samples_per_second": 144.079, |
|
"eval_steps_per_second": 1.803, |
|
"eval_wer": 0.2437294812869337, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.00013855092276144907, |
|
"loss": 0.3741, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.00013171565276828435, |
|
"loss": 0.3619, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"eval_loss": 0.20762862265110016, |
|
"eval_runtime": 72.0546, |
|
"eval_samples_per_second": 144.168, |
|
"eval_steps_per_second": 1.804, |
|
"eval_wer": 0.22530719444705, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 0.00012489405331510595, |
|
"loss": 0.3435, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 0.00011805878332194122, |
|
"loss": 0.3245, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"eval_loss": 0.2087564468383789, |
|
"eval_runtime": 71.9965, |
|
"eval_samples_per_second": 144.285, |
|
"eval_steps_per_second": 1.806, |
|
"eval_wer": 0.21862864646843636, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.0001112235133287765, |
|
"loss": 0.3135, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 0.0001044019138755981, |
|
"loss": 0.308, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"eval_loss": 0.2086208015680313, |
|
"eval_runtime": 68.9232, |
|
"eval_samples_per_second": 150.718, |
|
"eval_steps_per_second": 1.886, |
|
"eval_wer": 0.22063596285526685, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 9.756664388243337e-05, |
|
"loss": 0.292, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 9.073137388926864e-05, |
|
"loss": 0.2881, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"eval_loss": 0.20888157188892365, |
|
"eval_runtime": 70.2391, |
|
"eval_samples_per_second": 147.895, |
|
"eval_steps_per_second": 1.851, |
|
"eval_wer": 0.21048682112372197, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 8.389610389610389e-05, |
|
"loss": 0.2717, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 7.706083390293917e-05, |
|
"loss": 0.2557, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"eval_loss": 0.20148096978664398, |
|
"eval_runtime": 70.1726, |
|
"eval_samples_per_second": 148.035, |
|
"eval_steps_per_second": 1.853, |
|
"eval_wer": 0.20035643935840916, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 7.022556390977444e-05, |
|
"loss": 0.2536, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 6.33902939166097e-05, |
|
"loss": 0.248, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"eval_loss": 0.2043762356042862, |
|
"eval_runtime": 70.2184, |
|
"eval_samples_per_second": 147.938, |
|
"eval_steps_per_second": 1.851, |
|
"eval_wer": 0.19529124847575274, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 5.655502392344498e-05, |
|
"loss": 0.2308, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"learning_rate": 4.971975393028025e-05, |
|
"loss": 0.2251, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"eval_loss": 0.20575200021266937, |
|
"eval_runtime": 70.8946, |
|
"eval_samples_per_second": 146.527, |
|
"eval_steps_per_second": 1.834, |
|
"eval_wer": 0.19315261232529782, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.291182501708818e-05, |
|
"loss": 0.2207, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 3.6076555023923446e-05, |
|
"loss": 0.2052, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"eval_loss": 0.21170856058597565, |
|
"eval_runtime": 70.7957, |
|
"eval_samples_per_second": 146.732, |
|
"eval_steps_per_second": 1.836, |
|
"eval_wer": 0.18776850201669637, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"learning_rate": 2.9241285030758714e-05, |
|
"loss": 0.2026, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 8.96, |
|
"learning_rate": 2.2406015037593985e-05, |
|
"loss": 0.1976, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 8.96, |
|
"eval_loss": 0.21043309569358826, |
|
"eval_runtime": 71.1895, |
|
"eval_samples_per_second": 145.92, |
|
"eval_steps_per_second": 1.826, |
|
"eval_wer": 0.18249695150548728, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 1.5570745044429256e-05, |
|
"loss": 0.1875, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 8.735475051264526e-06, |
|
"loss": 0.1845, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"eval_loss": 0.21563756465911865, |
|
"eval_runtime": 71.0722, |
|
"eval_samples_per_second": 146.161, |
|
"eval_steps_per_second": 1.829, |
|
"eval_wer": 0.18212175218084609, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 9.92, |
|
"learning_rate": 1.9138755980861244e-06, |
|
"loss": 0.1837, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 15630, |
|
"total_flos": 9.942412569719006e+20, |
|
"train_loss": 0.1316310991176183, |
|
"train_runtime": 23113.6031, |
|
"train_samples_per_second": 173.054, |
|
"train_steps_per_second": 0.676 |
|
} |
|
], |
|
"max_steps": 15630, |
|
"num_train_epochs": 10, |
|
"total_flos": 9.942412569719006e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|