|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 100.15533980582525, |
|
"global_step": 5108, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.487205882352941e-05, |
|
"loss": 16.4084, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 2.9430882352941174e-05, |
|
"loss": 6.2142, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 4.398970588235294e-05, |
|
"loss": 4.1261, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 5.8548529411764694e-05, |
|
"loss": 3.2894, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"eval_cer": 1.0, |
|
"eval_loss": 3.1501412391662598, |
|
"eval_runtime": 111.2121, |
|
"eval_samples_per_second": 25.141, |
|
"eval_steps_per_second": 0.396, |
|
"eval_wer": 1.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 7.310735294117646e-05, |
|
"loss": 3.083, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"learning_rate": 7.5e-05, |
|
"loss": 2.9638, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 13.72, |
|
"learning_rate": 7.5e-05, |
|
"loss": 2.534, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 15.68, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.8586, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 15.68, |
|
"eval_cer": 0.24023761138033453, |
|
"eval_loss": 0.8871385455131531, |
|
"eval_runtime": 114.0709, |
|
"eval_samples_per_second": 24.511, |
|
"eval_steps_per_second": 0.386, |
|
"eval_wer": 0.6721492743607463, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 17.64, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.6008, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 19.6, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.4898, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 21.56, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.4105, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 23.52, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.3431, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 23.52, |
|
"eval_cer": 0.19387212755979366, |
|
"eval_loss": 0.5813264846801758, |
|
"eval_runtime": 110.7438, |
|
"eval_samples_per_second": 25.247, |
|
"eval_steps_per_second": 0.397, |
|
"eval_wer": 0.5502418797512094, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 25.49, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.2993, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 27.45, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.2563, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 29.41, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.2298, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 31.37, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.2052, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 31.37, |
|
"eval_cer": 0.1664738679589391, |
|
"eval_loss": 0.49559420347213745, |
|
"eval_runtime": 111.374, |
|
"eval_samples_per_second": 25.105, |
|
"eval_steps_per_second": 0.395, |
|
"eval_wer": 0.47878369039391844, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.186, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 35.29, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.1473, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 37.25, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.1235, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 39.21, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.1097, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 39.21, |
|
"eval_cer": 0.1397321661195352, |
|
"eval_loss": 0.44468843936920166, |
|
"eval_runtime": 109.918, |
|
"eval_samples_per_second": 25.437, |
|
"eval_steps_per_second": 0.4, |
|
"eval_wer": 0.41428242340474547, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 41.17, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.0951, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 43.14, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.0785, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 45.1, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.0575, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 47.06, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.0528, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 47.06, |
|
"eval_cer": 0.13331249022979522, |
|
"eval_loss": 0.4439217448234558, |
|
"eval_runtime": 111.2432, |
|
"eval_samples_per_second": 25.134, |
|
"eval_steps_per_second": 0.396, |
|
"eval_wer": 0.3960838516470859, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 49.02, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.0359, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 50.97, |
|
"learning_rate": 7.377299412915851e-05, |
|
"loss": 1.0118, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 52.93, |
|
"learning_rate": 7.098434442270058e-05, |
|
"loss": 1.0083, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 54.89, |
|
"learning_rate": 6.819569471624266e-05, |
|
"loss": 0.9939, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 54.89, |
|
"eval_cer": 0.13785628680110468, |
|
"eval_loss": 0.43479105830192566, |
|
"eval_runtime": 112.3814, |
|
"eval_samples_per_second": 24.88, |
|
"eval_steps_per_second": 0.392, |
|
"eval_wer": 0.40138217000691084, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 56.85, |
|
"learning_rate": 6.540704500978474e-05, |
|
"loss": 0.9884, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 58.82, |
|
"learning_rate": 6.26183953033268e-05, |
|
"loss": 0.9771, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 60.78, |
|
"learning_rate": 5.982974559686888e-05, |
|
"loss": 0.9577, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 62.74, |
|
"learning_rate": 5.704109589041095e-05, |
|
"loss": 0.9441, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 62.74, |
|
"eval_cer": 0.12230733156166954, |
|
"eval_loss": 0.42357733845710754, |
|
"eval_runtime": 114.6173, |
|
"eval_samples_per_second": 24.394, |
|
"eval_steps_per_second": 0.384, |
|
"eval_wer": 0.365261460492974, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 64.7, |
|
"learning_rate": 5.425244618395303e-05, |
|
"loss": 0.9296, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 66.66, |
|
"learning_rate": 5.14637964774951e-05, |
|
"loss": 0.9203, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 68.62, |
|
"learning_rate": 4.867514677103718e-05, |
|
"loss": 0.9215, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 70.58, |
|
"learning_rate": 4.5886497064579256e-05, |
|
"loss": 0.913, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 70.58, |
|
"eval_cer": 0.11566880308477932, |
|
"eval_loss": 0.43085047602653503, |
|
"eval_runtime": 112.648, |
|
"eval_samples_per_second": 24.821, |
|
"eval_steps_per_second": 0.391, |
|
"eval_wer": 0.3475236120709514, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 72.54, |
|
"learning_rate": 4.309784735812133e-05, |
|
"loss": 0.9033, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 4.03091976516634e-05, |
|
"loss": 0.8945, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 76.47, |
|
"learning_rate": 3.752054794520548e-05, |
|
"loss": 0.8714, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 78.43, |
|
"learning_rate": 3.473189823874755e-05, |
|
"loss": 0.8678, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 78.43, |
|
"eval_cer": 0.1109895263404721, |
|
"eval_loss": 0.42695942521095276, |
|
"eval_runtime": 109.8691, |
|
"eval_samples_per_second": 25.448, |
|
"eval_steps_per_second": 0.4, |
|
"eval_wer": 0.3337479843354066, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 80.39, |
|
"learning_rate": 3.1943248532289626e-05, |
|
"loss": 0.8612, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 82.35, |
|
"learning_rate": 2.9154598825831697e-05, |
|
"loss": 0.8603, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 84.31, |
|
"learning_rate": 2.6365949119373778e-05, |
|
"loss": 0.8451, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 86.27, |
|
"learning_rate": 2.357729941291585e-05, |
|
"loss": 0.8414, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 86.27, |
|
"eval_cer": 0.10695638580584649, |
|
"eval_loss": 0.41582536697387695, |
|
"eval_runtime": 110.6892, |
|
"eval_samples_per_second": 25.26, |
|
"eval_steps_per_second": 0.398, |
|
"eval_wer": 0.32204561161022804, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 88.23, |
|
"learning_rate": 2.0788649706457926e-05, |
|
"loss": 0.8394, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 90.19, |
|
"learning_rate": 1.7999999999999997e-05, |
|
"loss": 0.8304, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 92.16, |
|
"learning_rate": 1.5211350293542076e-05, |
|
"loss": 0.8188, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 94.12, |
|
"learning_rate": 1.2422700587084147e-05, |
|
"loss": 0.817, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 94.12, |
|
"eval_cer": 0.10721692460007295, |
|
"eval_loss": 0.4184626638889313, |
|
"eval_runtime": 112.2012, |
|
"eval_samples_per_second": 24.92, |
|
"eval_steps_per_second": 0.392, |
|
"eval_wer": 0.323105275282193, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 96.08, |
|
"learning_rate": 9.634050880626226e-06, |
|
"loss": 0.8126, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 98.04, |
|
"learning_rate": 6.845401174168297e-06, |
|
"loss": 0.8105, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 99.99, |
|
"learning_rate": 4.056751467710376e-06, |
|
"loss": 0.7959, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 100.16, |
|
"step": 5108, |
|
"total_flos": 8.553278225483388e+19, |
|
"train_loss": 1.6543282796931436, |
|
"train_runtime": 29157.3737, |
|
"train_samples_per_second": 22.424, |
|
"train_steps_per_second": 0.175 |
|
} |
|
], |
|
"max_steps": 5108, |
|
"num_train_epochs": 101, |
|
"total_flos": 8.553278225483388e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|