|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 100.0, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.00019800000000000002, |
|
"loss": 4.9661, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.000398, |
|
"loss": 2.9221, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 0.000598, |
|
"loss": 1.6582, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 0.0007980000000000001, |
|
"loss": 0.4613, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 0.000998, |
|
"loss": 0.3082, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"eval_loss": 0.38708925247192383, |
|
"eval_runtime": 140.6014, |
|
"eval_samples_per_second": 18.584, |
|
"eval_steps_per_second": 0.263, |
|
"eval_wer": 0.4907113872685772, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 0.0009980271764103532, |
|
"loss": 0.2368, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 0.000992044732251972, |
|
"loss": 0.202, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 0.0009821006332271156, |
|
"loss": 0.1745, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 0.0009682749433740962, |
|
"loss": 0.1594, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 0.0009506789790182364, |
|
"loss": 0.1497, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.41679587960243225, |
|
"eval_runtime": 138.5858, |
|
"eval_samples_per_second": 18.855, |
|
"eval_steps_per_second": 0.267, |
|
"eval_wer": 0.4278468171443064, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 0.00092945441251827, |
|
"loss": 0.1406, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 0.0009047721316038118, |
|
"loss": 0.1308, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 0.0008768308634878388, |
|
"loss": 0.1285, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 0.0008458555748320216, |
|
"loss": 0.1213, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 0.0008120956604474414, |
|
"loss": 0.1243, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"eval_loss": 0.4445520341396332, |
|
"eval_runtime": 137.9299, |
|
"eval_samples_per_second": 18.944, |
|
"eval_steps_per_second": 0.268, |
|
"eval_wer": 0.4220136951559726, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 0.0007758229353142152, |
|
"loss": 0.1215, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 0.0007373294460870985, |
|
"loss": 0.1185, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 0.0006969251197075427, |
|
"loss": 0.1064, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 0.0006549352680541975, |
|
"loss": 0.1085, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 0.000611697968722942, |
|
"loss": 0.0954, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_loss": 0.44264093041419983, |
|
"eval_runtime": 138.1747, |
|
"eval_samples_per_second": 18.911, |
|
"eval_steps_per_second": 0.268, |
|
"eval_wer": 0.39459168146081663, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 0.0005675613430248713, |
|
"loss": 0.0923, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 0.0005228807531181908, |
|
"loss": 0.0847, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 0.00047801594084106763, |
|
"loss": 0.0907, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 0.0004333281312818746, |
|
"loss": 0.0833, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 0.00038917712440717607, |
|
"loss": 0.0741, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"eval_loss": 0.4501730799674988, |
|
"eval_runtime": 137.8839, |
|
"eval_samples_per_second": 18.951, |
|
"eval_steps_per_second": 0.268, |
|
"eval_wer": 0.3800405782399188, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 0.00034591839816395533, |
|
"loss": 0.072, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 0.00030390024638020374, |
|
"loss": 0.0684, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 0.0002634609745078109, |
|
"loss": 0.062, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 0.00022492617578598646, |
|
"loss": 0.06, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 0.00018860610975594382, |
|
"loss": 0.0533, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_loss": 0.4618484675884247, |
|
"eval_runtime": 138.9018, |
|
"eval_samples_per_second": 18.812, |
|
"eval_steps_per_second": 0.266, |
|
"eval_wer": 0.3653309662693381, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 0.0001547932042335039, |
|
"loss": 0.0542, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 0.00012375970085226701, |
|
"loss": 0.0514, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 9.575546313405425e-05, |
|
"loss": 0.0501, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 7.100596473474763e-05, |
|
"loss": 0.0466, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 4.9710474062988955e-05, |
|
"loss": 0.0447, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"eval_loss": 0.4518083930015564, |
|
"eval_runtime": 138.0899, |
|
"eval_samples_per_second": 18.922, |
|
"eval_steps_per_second": 0.268, |
|
"eval_wer": 0.3460563023078874, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 3.204044988812144e-05, |
|
"loss": 0.0414, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 1.8138160854995144e-05, |
|
"loss": 0.0415, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 8.115540020491363e-06, |
|
"loss": 0.0406, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 2.053283634363745e-06, |
|
"loss": 0.0416, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 2.0142048445803695e-10, |
|
"loss": 0.0396, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_loss": 0.46233049035072327, |
|
"eval_runtime": 138.5502, |
|
"eval_samples_per_second": 18.86, |
|
"eval_steps_per_second": 0.267, |
|
"eval_wer": 0.34199847831600305, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 4000, |
|
"total_flos": 1.2065530437849294e+20, |
|
"train_loss": 0.3406678358316422, |
|
"train_runtime": 40685.8922, |
|
"train_samples_per_second": 14.049, |
|
"train_steps_per_second": 0.098 |
|
} |
|
], |
|
"max_steps": 4000, |
|
"num_train_epochs": 100, |
|
"total_flos": 1.2065530437849294e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|