|
{
|
|
"best_metric": 26.972262247838618,
|
|
"best_model_checkpoint": "./whisper-medium-hy-large-data\\checkpoint-3090",
|
|
"epoch": 8.0,
|
|
"eval_steps": 500,
|
|
"global_step": 3090,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.9993527508090615,
|
|
"grad_norm": 2.3409805297851562,
|
|
"learning_rate": 9.60099492175355e-06,
|
|
"loss": 0.0122,
|
|
"step": 386
|
|
},
|
|
{
|
|
"epoch": 0.9993527508090615,
|
|
"eval_loss": 0.19175899028778076,
|
|
"eval_runtime": 42548.1103,
|
|
"eval_samples_per_second": 0.101,
|
|
"eval_steps_per_second": 0.05,
|
|
"eval_wer": 28.368155619596543,
|
|
"step": 386
|
|
},
|
|
{
|
|
"epoch": 1.998705501618123,
|
|
"grad_norm": 1.1300331354141235,
|
|
"learning_rate": 9.200953466680485e-06,
|
|
"loss": 0.0095,
|
|
"step": 772
|
|
},
|
|
{
|
|
"epoch": 1.998705501618123,
|
|
"eval_loss": 0.19632793962955475,
|
|
"eval_runtime": 45633.7169,
|
|
"eval_samples_per_second": 0.094,
|
|
"eval_steps_per_second": 0.047,
|
|
"eval_wer": 30.382444764649374,
|
|
"step": 772
|
|
},
|
|
{
|
|
"epoch": 2.9980582524271844,
|
|
"grad_norm": 0.9480321407318115,
|
|
"learning_rate": 8.800912011607421e-06,
|
|
"loss": 0.0076,
|
|
"step": 1158
|
|
},
|
|
{
|
|
"epoch": 2.9980582524271844,
|
|
"eval_loss": 0.19936133921146393,
|
|
"eval_runtime": 45880.7942,
|
|
"eval_samples_per_second": 0.093,
|
|
"eval_steps_per_second": 0.047,
|
|
"eval_wer": 27.785782901056677,
|
|
"step": 1158
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"grad_norm": 4.70139217376709,
|
|
"learning_rate": 8.399834179707742e-06,
|
|
"loss": 0.0057,
|
|
"step": 1545
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_loss": 0.20888063311576843,
|
|
"eval_runtime": 32746.3235,
|
|
"eval_samples_per_second": 0.131,
|
|
"eval_steps_per_second": 0.065,
|
|
"eval_wer": 27.668707973102784,
|
|
"step": 1545
|
|
},
|
|
{
|
|
"epoch": 4.999352750809061,
|
|
"grad_norm": 1.9372472763061523,
|
|
"learning_rate": 7.999792724634678e-06,
|
|
"loss": 0.0046,
|
|
"step": 1931
|
|
},
|
|
{
|
|
"epoch": 4.999352750809061,
|
|
"eval_loss": 0.2178862988948822,
|
|
"eval_runtime": 46002.5276,
|
|
"eval_samples_per_second": 0.093,
|
|
"eval_steps_per_second": 0.047,
|
|
"eval_wer": 28.542267050912585,
|
|
"step": 1931
|
|
},
|
|
{
|
|
"epoch": 5.998705501618123,
|
|
"grad_norm": 1.5416231155395508,
|
|
"learning_rate": 7.599751269561612e-06,
|
|
"loss": 0.0036,
|
|
"step": 2317
|
|
},
|
|
{
|
|
"epoch": 5.998705501618123,
|
|
"eval_loss": 0.21689267456531525,
|
|
"eval_runtime": 45863.2972,
|
|
"eval_samples_per_second": 0.093,
|
|
"eval_steps_per_second": 0.047,
|
|
"eval_wer": 27.554634966378483,
|
|
"step": 2317
|
|
},
|
|
{
|
|
"epoch": 6.998058252427184,
|
|
"grad_norm": 0.7119281888008118,
|
|
"learning_rate": 7.199709814488549e-06,
|
|
"loss": 0.0028,
|
|
"step": 2703
|
|
},
|
|
{
|
|
"epoch": 6.998058252427184,
|
|
"eval_loss": 0.21707293391227722,
|
|
"eval_runtime": 45784.2835,
|
|
"eval_samples_per_second": 0.094,
|
|
"eval_steps_per_second": 0.047,
|
|
"eval_wer": 27.251440922190202,
|
|
"step": 2703
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"grad_norm": 0.8986366391181946,
|
|
"learning_rate": 6.798631982588871e-06,
|
|
"loss": 0.0028,
|
|
"step": 3090
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_loss": 0.2156277447938919,
|
|
"eval_runtime": 32539.3621,
|
|
"eval_samples_per_second": 0.132,
|
|
"eval_steps_per_second": 0.066,
|
|
"eval_wer": 26.972262247838618,
|
|
"step": 3090
|
|
}
|
|
],
|
|
"logging_steps": 500,
|
|
"max_steps": 9650,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 25,
|
|
"save_steps": 500,
|
|
"total_flos": 5.04587066277888e+19,
|
|
"train_batch_size": 4,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|