|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.536067892503536, |
|
"eval_steps": 100, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14144271570014144, |
|
"eval_loss": 6.970242977142334, |
|
"eval_runtime": 137.617, |
|
"eval_samples_per_second": 41.1, |
|
"eval_steps_per_second": 5.137, |
|
"eval_wer": 1.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2828854314002829, |
|
"eval_loss": 3.36755633354187, |
|
"eval_runtime": 133.5904, |
|
"eval_samples_per_second": 42.338, |
|
"eval_steps_per_second": 5.292, |
|
"eval_wer": 1.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4243281471004243, |
|
"eval_loss": 3.0392866134643555, |
|
"eval_runtime": 133.8525, |
|
"eval_samples_per_second": 42.255, |
|
"eval_steps_per_second": 5.282, |
|
"eval_wer": 1.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5657708628005658, |
|
"eval_loss": 2.9887070655822754, |
|
"eval_runtime": 134.092, |
|
"eval_samples_per_second": 42.18, |
|
"eval_steps_per_second": 5.272, |
|
"eval_wer": 1.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7072135785007072, |
|
"grad_norm": 0.6687237620353699, |
|
"learning_rate": 0.00029699999999999996, |
|
"loss": 4.717, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7072135785007072, |
|
"eval_loss": 3.0226564407348633, |
|
"eval_runtime": 134.7626, |
|
"eval_samples_per_second": 41.97, |
|
"eval_steps_per_second": 5.246, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8486562942008486, |
|
"eval_loss": 3.0405795574188232, |
|
"eval_runtime": 135.9502, |
|
"eval_samples_per_second": 41.603, |
|
"eval_steps_per_second": 5.2, |
|
"eval_wer": 1.0, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.9900990099009901, |
|
"eval_loss": 3.0028798580169678, |
|
"eval_runtime": 135.0397, |
|
"eval_samples_per_second": 41.884, |
|
"eval_steps_per_second": 5.235, |
|
"eval_wer": 1.0, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.1315417256011315, |
|
"eval_loss": 2.948317050933838, |
|
"eval_runtime": 135.3684, |
|
"eval_samples_per_second": 41.782, |
|
"eval_steps_per_second": 5.223, |
|
"eval_wer": 1.0, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.272984441301273, |
|
"eval_loss": 2.9510738849639893, |
|
"eval_runtime": 135.3897, |
|
"eval_samples_per_second": 41.776, |
|
"eval_steps_per_second": 5.222, |
|
"eval_wer": 1.0, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.4144271570014144, |
|
"grad_norm": 0.3827725648880005, |
|
"learning_rate": 0.00022574999999999996, |
|
"loss": 3.0065, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.4144271570014144, |
|
"eval_loss": 2.9472572803497314, |
|
"eval_runtime": 134.8251, |
|
"eval_samples_per_second": 41.951, |
|
"eval_steps_per_second": 5.244, |
|
"eval_wer": 1.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.5558698727015559, |
|
"eval_loss": 2.9448139667510986, |
|
"eval_runtime": 135.1998, |
|
"eval_samples_per_second": 41.834, |
|
"eval_steps_per_second": 5.229, |
|
"eval_wer": 1.0, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.6973125884016973, |
|
"eval_loss": 2.946981191635132, |
|
"eval_runtime": 135.1335, |
|
"eval_samples_per_second": 41.855, |
|
"eval_steps_per_second": 5.232, |
|
"eval_wer": 1.0, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.8387553041018387, |
|
"eval_loss": 2.944624662399292, |
|
"eval_runtime": 135.5596, |
|
"eval_samples_per_second": 41.723, |
|
"eval_steps_per_second": 5.215, |
|
"eval_wer": 1.0, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.9801980198019802, |
|
"eval_loss": 2.9431614875793457, |
|
"eval_runtime": 135.6031, |
|
"eval_samples_per_second": 41.71, |
|
"eval_steps_per_second": 5.214, |
|
"eval_wer": 1.0, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.1216407355021216, |
|
"grad_norm": 0.5749518275260925, |
|
"learning_rate": 0.0001512, |
|
"loss": 2.9634, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.1216407355021216, |
|
"eval_loss": 2.9475975036621094, |
|
"eval_runtime": 136.0526, |
|
"eval_samples_per_second": 41.572, |
|
"eval_steps_per_second": 5.197, |
|
"eval_wer": 1.0, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.263083451202263, |
|
"eval_loss": 2.9624321460723877, |
|
"eval_runtime": 136.0863, |
|
"eval_samples_per_second": 41.562, |
|
"eval_steps_per_second": 5.195, |
|
"eval_wer": 1.0, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.4045261669024045, |
|
"eval_loss": 2.9580626487731934, |
|
"eval_runtime": 136.566, |
|
"eval_samples_per_second": 41.416, |
|
"eval_steps_per_second": 5.177, |
|
"eval_wer": 1.0, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.545968882602546, |
|
"eval_loss": 2.9552838802337646, |
|
"eval_runtime": 136.3048, |
|
"eval_samples_per_second": 41.495, |
|
"eval_steps_per_second": 5.187, |
|
"eval_wer": 1.0, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.6874115983026874, |
|
"eval_loss": 2.9515163898468018, |
|
"eval_runtime": 136.0496, |
|
"eval_samples_per_second": 41.573, |
|
"eval_steps_per_second": 5.197, |
|
"eval_wer": 1.0, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.828854314002829, |
|
"grad_norm": 0.206673726439476, |
|
"learning_rate": 7.664999999999999e-05, |
|
"loss": 2.9677, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.828854314002829, |
|
"eval_loss": 2.9480981826782227, |
|
"eval_runtime": 134.8264, |
|
"eval_samples_per_second": 41.95, |
|
"eval_steps_per_second": 5.244, |
|
"eval_wer": 1.0, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.9702970297029703, |
|
"eval_loss": 2.9509432315826416, |
|
"eval_runtime": 134.773, |
|
"eval_samples_per_second": 41.967, |
|
"eval_steps_per_second": 5.246, |
|
"eval_wer": 1.0, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.1117397454031117, |
|
"eval_loss": 2.940824031829834, |
|
"eval_runtime": 135.008, |
|
"eval_samples_per_second": 41.894, |
|
"eval_steps_per_second": 5.237, |
|
"eval_wer": 1.0, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.253182461103253, |
|
"eval_loss": 2.9393398761749268, |
|
"eval_runtime": 134.9605, |
|
"eval_samples_per_second": 41.909, |
|
"eval_steps_per_second": 5.239, |
|
"eval_wer": 1.0, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.3946251768033946, |
|
"eval_loss": 2.938136339187622, |
|
"eval_runtime": 134.6374, |
|
"eval_samples_per_second": 42.009, |
|
"eval_steps_per_second": 5.251, |
|
"eval_wer": 1.0, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.536067892503536, |
|
"grad_norm": 0.6203744411468506, |
|
"learning_rate": 1.9499999999999995e-06, |
|
"loss": 2.9612, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.536067892503536, |
|
"eval_loss": 2.9374959468841553, |
|
"eval_runtime": 134.9253, |
|
"eval_samples_per_second": 41.919, |
|
"eval_steps_per_second": 5.24, |
|
"eval_wer": 1.0, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.536067892503536, |
|
"step": 2500, |
|
"total_flos": 8.753895488690287e+18, |
|
"train_loss": 3.32313603515625, |
|
"train_runtime": 6689.3091, |
|
"train_samples_per_second": 11.959, |
|
"train_steps_per_second": 0.374 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 400, |
|
"total_flos": 8.753895488690287e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|