lyhourt's picture
Training in progress, step 1000, checkpoint
60e20cd verified
raw
history blame
No virus
8.19 kB
{
"best_metric": 21.531825600373047,
"best_model_checkpoint": "./whisper-small-clean_6-v5/checkpoint-1000",
"epoch": 1.8903591682419658,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04725897920604915,
"grad_norm": 4.044429302215576,
"learning_rate": 4.4e-06,
"loss": 0.6829,
"step": 25
},
{
"epoch": 0.0945179584120983,
"grad_norm": 4.314411640167236,
"learning_rate": 9.4e-06,
"loss": 0.2473,
"step": 50
},
{
"epoch": 0.14177693761814744,
"grad_norm": 3.5965349674224854,
"learning_rate": 9.848275862068966e-06,
"loss": 0.201,
"step": 75
},
{
"epoch": 0.1890359168241966,
"grad_norm": 3.4847946166992188,
"learning_rate": 9.675862068965518e-06,
"loss": 0.1812,
"step": 100
},
{
"epoch": 0.23629489603024575,
"grad_norm": 3.8979456424713135,
"learning_rate": 9.503448275862069e-06,
"loss": 0.1815,
"step": 125
},
{
"epoch": 0.2835538752362949,
"grad_norm": 4.511611461639404,
"learning_rate": 9.331034482758623e-06,
"loss": 0.1789,
"step": 150
},
{
"epoch": 0.33081285444234404,
"grad_norm": 4.727020263671875,
"learning_rate": 9.158620689655173e-06,
"loss": 0.1709,
"step": 175
},
{
"epoch": 0.3780718336483932,
"grad_norm": 3.503211259841919,
"learning_rate": 8.986206896551725e-06,
"loss": 0.1761,
"step": 200
},
{
"epoch": 0.42533081285444235,
"grad_norm": 3.376284599304199,
"learning_rate": 8.813793103448277e-06,
"loss": 0.1673,
"step": 225
},
{
"epoch": 0.4725897920604915,
"grad_norm": 4.138789176940918,
"learning_rate": 8.641379310344828e-06,
"loss": 0.172,
"step": 250
},
{
"epoch": 0.5198487712665406,
"grad_norm": 3.522365093231201,
"learning_rate": 8.46896551724138e-06,
"loss": 0.1627,
"step": 275
},
{
"epoch": 0.5671077504725898,
"grad_norm": 2.7766571044921875,
"learning_rate": 8.296551724137932e-06,
"loss": 0.1605,
"step": 300
},
{
"epoch": 0.6143667296786389,
"grad_norm": 3.5490145683288574,
"learning_rate": 8.124137931034484e-06,
"loss": 0.1574,
"step": 325
},
{
"epoch": 0.6616257088846881,
"grad_norm": 3.641836643218994,
"learning_rate": 7.951724137931034e-06,
"loss": 0.1637,
"step": 350
},
{
"epoch": 0.7088846880907372,
"grad_norm": 3.28399920463562,
"learning_rate": 7.779310344827586e-06,
"loss": 0.156,
"step": 375
},
{
"epoch": 0.7561436672967864,
"grad_norm": 3.2820281982421875,
"learning_rate": 7.6068965517241385e-06,
"loss": 0.1578,
"step": 400
},
{
"epoch": 0.8034026465028355,
"grad_norm": 3.589008092880249,
"learning_rate": 7.4344827586206906e-06,
"loss": 0.1573,
"step": 425
},
{
"epoch": 0.8506616257088847,
"grad_norm": 3.9363508224487305,
"learning_rate": 7.262068965517242e-06,
"loss": 0.1527,
"step": 450
},
{
"epoch": 0.8979206049149339,
"grad_norm": 3.573969841003418,
"learning_rate": 7.089655172413794e-06,
"loss": 0.1561,
"step": 475
},
{
"epoch": 0.945179584120983,
"grad_norm": 3.35899019241333,
"learning_rate": 6.917241379310345e-06,
"loss": 0.1461,
"step": 500
},
{
"epoch": 0.945179584120983,
"eval_loss": 0.27461186051368713,
"eval_runtime": 652.5217,
"eval_samples_per_second": 2.337,
"eval_steps_per_second": 0.074,
"eval_wer": 23.047330380041966,
"step": 500
},
{
"epoch": 0.9924385633270322,
"grad_norm": 2.7852671146392822,
"learning_rate": 6.744827586206897e-06,
"loss": 0.1428,
"step": 525
},
{
"epoch": 1.0396975425330812,
"grad_norm": 1.9958089590072632,
"learning_rate": 6.572413793103449e-06,
"loss": 0.0829,
"step": 550
},
{
"epoch": 1.0869565217391304,
"grad_norm": 2.1867661476135254,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.0698,
"step": 575
},
{
"epoch": 1.1342155009451795,
"grad_norm": 1.7803280353546143,
"learning_rate": 6.227586206896552e-06,
"loss": 0.0717,
"step": 600
},
{
"epoch": 1.1814744801512287,
"grad_norm": 1.8491047620773315,
"learning_rate": 6.055172413793105e-06,
"loss": 0.0734,
"step": 625
},
{
"epoch": 1.2287334593572778,
"grad_norm": 2.021000862121582,
"learning_rate": 5.882758620689656e-06,
"loss": 0.0673,
"step": 650
},
{
"epoch": 1.275992438563327,
"grad_norm": 2.592538833618164,
"learning_rate": 5.710344827586207e-06,
"loss": 0.0713,
"step": 675
},
{
"epoch": 1.3232514177693762,
"grad_norm": 4.53612756729126,
"learning_rate": 5.5379310344827585e-06,
"loss": 0.0756,
"step": 700
},
{
"epoch": 1.3705103969754253,
"grad_norm": 2.295854091644287,
"learning_rate": 5.365517241379311e-06,
"loss": 0.069,
"step": 725
},
{
"epoch": 1.4177693761814745,
"grad_norm": 2.696308135986328,
"learning_rate": 5.193103448275863e-06,
"loss": 0.0724,
"step": 750
},
{
"epoch": 1.4650283553875236,
"grad_norm": 2.185211420059204,
"learning_rate": 5.020689655172414e-06,
"loss": 0.071,
"step": 775
},
{
"epoch": 1.5122873345935728,
"grad_norm": 2.22100830078125,
"learning_rate": 4.848275862068966e-06,
"loss": 0.0722,
"step": 800
},
{
"epoch": 1.559546313799622,
"grad_norm": 2.4104700088500977,
"learning_rate": 4.675862068965517e-06,
"loss": 0.0693,
"step": 825
},
{
"epoch": 1.606805293005671,
"grad_norm": 2.2482173442840576,
"learning_rate": 4.503448275862069e-06,
"loss": 0.0662,
"step": 850
},
{
"epoch": 1.6540642722117203,
"grad_norm": 1.6513797044754028,
"learning_rate": 4.3310344827586206e-06,
"loss": 0.075,
"step": 875
},
{
"epoch": 1.7013232514177694,
"grad_norm": 1.8004822731018066,
"learning_rate": 4.158620689655173e-06,
"loss": 0.0651,
"step": 900
},
{
"epoch": 1.7485822306238186,
"grad_norm": 2.4322433471679688,
"learning_rate": 3.986206896551724e-06,
"loss": 0.0729,
"step": 925
},
{
"epoch": 1.7958412098298677,
"grad_norm": 1.786984920501709,
"learning_rate": 3.813793103448276e-06,
"loss": 0.0698,
"step": 950
},
{
"epoch": 1.8431001890359169,
"grad_norm": 2.466444253921509,
"learning_rate": 3.641379310344828e-06,
"loss": 0.0737,
"step": 975
},
{
"epoch": 1.8903591682419658,
"grad_norm": 2.1226634979248047,
"learning_rate": 3.4689655172413793e-06,
"loss": 0.0631,
"step": 1000
},
{
"epoch": 1.8903591682419658,
"eval_loss": 0.2692776620388031,
"eval_runtime": 652.5262,
"eval_samples_per_second": 2.337,
"eval_steps_per_second": 0.074,
"eval_wer": 21.531825600373047,
"step": 1000
}
],
"logging_steps": 25,
"max_steps": 1500,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.845243908186112e+19,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}