whisper-tiny-en / trainer_state.json
Ellight's picture
Upload 19 files
56614d6 verified
{
"best_metric": 32.52656434474616,
"best_model_checkpoint": "./whisper-tiny-en/checkpoint-500",
"epoch": 17.857142857142858,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.89,
"grad_norm": 34.12553405761719,
"learning_rate": 4.2000000000000004e-06,
"loss": 3.184,
"step": 25
},
{
"epoch": 1.79,
"grad_norm": 14.376067161560059,
"learning_rate": 9.200000000000002e-06,
"loss": 1.352,
"step": 50
},
{
"epoch": 2.68,
"grad_norm": 11.755908966064453,
"learning_rate": 1e-05,
"loss": 0.4052,
"step": 75
},
{
"epoch": 3.57,
"grad_norm": 7.901417255401611,
"learning_rate": 1e-05,
"loss": 0.2839,
"step": 100
},
{
"epoch": 4.46,
"grad_norm": 5.7967610359191895,
"learning_rate": 1e-05,
"loss": 0.1608,
"step": 125
},
{
"epoch": 5.36,
"grad_norm": 4.712026596069336,
"learning_rate": 1e-05,
"loss": 0.0984,
"step": 150
},
{
"epoch": 6.25,
"grad_norm": 1.4813052415847778,
"learning_rate": 1e-05,
"loss": 0.0546,
"step": 175
},
{
"epoch": 7.14,
"grad_norm": 0.8922443985939026,
"learning_rate": 1e-05,
"loss": 0.0286,
"step": 200
},
{
"epoch": 8.04,
"grad_norm": 2.358762502670288,
"learning_rate": 1e-05,
"loss": 0.0134,
"step": 225
},
{
"epoch": 8.93,
"grad_norm": 0.38798490166664124,
"learning_rate": 1e-05,
"loss": 0.0066,
"step": 250
},
{
"epoch": 9.82,
"grad_norm": 0.184628427028656,
"learning_rate": 1e-05,
"loss": 0.0033,
"step": 275
},
{
"epoch": 10.71,
"grad_norm": 0.10512322932481766,
"learning_rate": 1e-05,
"loss": 0.0021,
"step": 300
},
{
"epoch": 11.61,
"grad_norm": 0.11653382331132889,
"learning_rate": 1e-05,
"loss": 0.002,
"step": 325
},
{
"epoch": 12.5,
"grad_norm": 0.07005073130130768,
"learning_rate": 1e-05,
"loss": 0.0014,
"step": 350
},
{
"epoch": 13.39,
"grad_norm": 0.05834714323282242,
"learning_rate": 1e-05,
"loss": 0.0011,
"step": 375
},
{
"epoch": 14.29,
"grad_norm": 0.04799078404903412,
"learning_rate": 1e-05,
"loss": 0.0009,
"step": 400
},
{
"epoch": 15.18,
"grad_norm": 0.045657627284526825,
"learning_rate": 1e-05,
"loss": 0.0009,
"step": 425
},
{
"epoch": 16.07,
"grad_norm": 0.03895680606365204,
"learning_rate": 1e-05,
"loss": 0.0007,
"step": 450
},
{
"epoch": 16.96,
"grad_norm": 0.039002932608127594,
"learning_rate": 1e-05,
"loss": 0.0007,
"step": 475
},
{
"epoch": 17.86,
"grad_norm": 0.03650466352701187,
"learning_rate": 1e-05,
"loss": 0.0006,
"step": 500
},
{
"epoch": 17.86,
"eval_loss": 0.6624370813369751,
"eval_runtime": 48.1951,
"eval_samples_per_second": 2.345,
"eval_steps_per_second": 0.166,
"eval_wer": 32.52656434474616,
"eval_wer_ortho": 32.510795805058606,
"step": 500
}
],
"logging_steps": 25,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 18,
"save_steps": 500,
"total_flos": 1.9569551781888e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}