makhataei's picture
Training in progress, step 600
2ecb144
{
"best_metric": 43.17606786724215,
"best_model_checkpoint": "/media/makhataei/Backups/Whisper-Small-Common-Voice/checkpoint-300",
"epoch": 1.5701668302257115,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"learning_rate": 5e-06,
"loss": 0.3008,
"step": 25
},
{
"epoch": 0.2,
"learning_rate": 1e-05,
"loss": 0.2368,
"step": 50
},
{
"epoch": 0.29,
"learning_rate": 9.974874371859297e-06,
"loss": 0.2001,
"step": 75
},
{
"epoch": 0.39,
"learning_rate": 9.949748743718594e-06,
"loss": 0.1801,
"step": 100
},
{
"epoch": 0.39,
"eval_loss": 0.4975946843624115,
"eval_runtime": 1546.7344,
"eval_samples_per_second": 5.59,
"eval_steps_per_second": 0.559,
"eval_wer": 49.12602735195383,
"step": 100
},
{
"epoch": 0.49,
"learning_rate": 9.92462311557789e-06,
"loss": 0.1713,
"step": 125
},
{
"epoch": 0.59,
"learning_rate": 9.899497487437186e-06,
"loss": 0.1657,
"step": 150
},
{
"epoch": 0.69,
"learning_rate": 9.874371859296484e-06,
"loss": 0.1574,
"step": 175
},
{
"epoch": 0.79,
"learning_rate": 9.84924623115578e-06,
"loss": 0.1597,
"step": 200
},
{
"epoch": 0.79,
"eval_loss": 0.46244511008262634,
"eval_runtime": 1507.9912,
"eval_samples_per_second": 5.733,
"eval_steps_per_second": 0.574,
"eval_wer": 46.749681665591766,
"step": 200
},
{
"epoch": 0.88,
"learning_rate": 9.824120603015075e-06,
"loss": 0.1509,
"step": 225
},
{
"epoch": 0.98,
"learning_rate": 9.798994974874372e-06,
"loss": 0.1434,
"step": 250
},
{
"epoch": 1.08,
"learning_rate": 9.773869346733669e-06,
"loss": 0.0919,
"step": 275
},
{
"epoch": 1.18,
"learning_rate": 9.748743718592965e-06,
"loss": 0.0776,
"step": 300
},
{
"epoch": 1.18,
"eval_loss": 0.4794461131095886,
"eval_runtime": 1499.4378,
"eval_samples_per_second": 5.766,
"eval_steps_per_second": 0.577,
"eval_wer": 43.17606786724215,
"step": 300
},
{
"epoch": 1.28,
"learning_rate": 9.723618090452262e-06,
"loss": 0.0772,
"step": 325
},
{
"epoch": 1.37,
"learning_rate": 9.698492462311559e-06,
"loss": 0.0799,
"step": 350
},
{
"epoch": 1.47,
"learning_rate": 9.673366834170855e-06,
"loss": 0.0872,
"step": 375
},
{
"epoch": 1.57,
"learning_rate": 9.648241206030152e-06,
"loss": 0.083,
"step": 400
},
{
"epoch": 1.57,
"eval_loss": 0.48234912753105164,
"eval_runtime": 1500.1085,
"eval_samples_per_second": 5.764,
"eval_steps_per_second": 0.577,
"eval_wer": 43.802814572274315,
"step": 400
}
],
"logging_steps": 25,
"max_steps": 10000,
"num_train_epochs": 40,
"save_steps": 100,
"total_flos": 6.46171569856512e+18,
"trial_name": null,
"trial_params": null
}