sha1779's picture
Upload folder using huggingface_hub
07ba7ca verified
{
"best_metric": 0.9502835538752363,
"best_model_checkpoint": "BengaliRegionalASR_finetune1/checkpoint-400",
"epoch": 88.88888888888889,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.2222222222222223,
"grad_norm": 8.39985179901123,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.8375,
"step": 10
},
{
"epoch": 4.444444444444445,
"grad_norm": 4.356942176818848,
"learning_rate": 4.000000000000001e-06,
"loss": 0.7948,
"step": 20
},
{
"epoch": 6.666666666666667,
"grad_norm": 3.8019840717315674,
"learning_rate": 6e-06,
"loss": 0.7282,
"step": 30
},
{
"epoch": 8.88888888888889,
"grad_norm": 11.102900505065918,
"learning_rate": 7.800000000000002e-06,
"loss": 0.6569,
"step": 40
},
{
"epoch": 11.11111111111111,
"grad_norm": 12.99394416809082,
"learning_rate": 9.600000000000001e-06,
"loss": 0.6086,
"step": 50
},
{
"epoch": 11.11111111111111,
"eval_loss": 0.6732638478279114,
"eval_runtime": 20.2721,
"eval_samples_per_second": 4.193,
"eval_steps_per_second": 0.099,
"eval_wer": 1.1232850609756098,
"step": 50
},
{
"epoch": 13.333333333333334,
"grad_norm": 14.49991226196289,
"learning_rate": 9.771428571428571e-06,
"loss": 0.568,
"step": 60
},
{
"epoch": 15.555555555555555,
"grad_norm": 9.160536766052246,
"learning_rate": 9.485714285714287e-06,
"loss": 0.5212,
"step": 70
},
{
"epoch": 17.77777777777778,
"grad_norm": 5.1057634353637695,
"learning_rate": 9.200000000000002e-06,
"loss": 0.4796,
"step": 80
},
{
"epoch": 20.0,
"grad_norm": 8.087906837463379,
"learning_rate": 8.914285714285716e-06,
"loss": 0.4458,
"step": 90
},
{
"epoch": 22.22222222222222,
"grad_norm": 6.61909294128418,
"learning_rate": 8.628571428571429e-06,
"loss": 0.4243,
"step": 100
},
{
"epoch": 22.22222222222222,
"eval_loss": 0.5663390159606934,
"eval_runtime": 19.9974,
"eval_samples_per_second": 4.251,
"eval_steps_per_second": 0.1,
"eval_wer": 1.068579027355623,
"step": 100
},
{
"epoch": 24.444444444444443,
"grad_norm": 4.257968902587891,
"learning_rate": 8.342857142857143e-06,
"loss": 0.4015,
"step": 110
},
{
"epoch": 26.666666666666668,
"grad_norm": 4.2625226974487305,
"learning_rate": 8.057142857142857e-06,
"loss": 0.3832,
"step": 120
},
{
"epoch": 28.88888888888889,
"grad_norm": 4.276031970977783,
"learning_rate": 7.771428571428572e-06,
"loss": 0.3557,
"step": 130
},
{
"epoch": 31.11111111111111,
"grad_norm": 12.266279220581055,
"learning_rate": 7.485714285714286e-06,
"loss": 0.3353,
"step": 140
},
{
"epoch": 33.333333333333336,
"grad_norm": 9.257742881774902,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.3123,
"step": 150
},
{
"epoch": 33.333333333333336,
"eval_loss": 0.5672491788864136,
"eval_runtime": 20.5784,
"eval_samples_per_second": 4.131,
"eval_steps_per_second": 0.097,
"eval_wer": 0.9937488160636484,
"step": 150
},
{
"epoch": 35.55555555555556,
"grad_norm": 4.698246479034424,
"learning_rate": 6.914285714285715e-06,
"loss": 0.2906,
"step": 160
},
{
"epoch": 37.77777777777778,
"grad_norm": 4.475348472595215,
"learning_rate": 6.628571428571428e-06,
"loss": 0.2597,
"step": 170
},
{
"epoch": 40.0,
"grad_norm": 2.7412610054016113,
"learning_rate": 6.342857142857143e-06,
"loss": 0.2397,
"step": 180
},
{
"epoch": 42.22222222222222,
"grad_norm": 2.8001537322998047,
"learning_rate": 6.057142857142858e-06,
"loss": 0.2183,
"step": 190
},
{
"epoch": 44.44444444444444,
"grad_norm": 2.7077558040618896,
"learning_rate": 5.771428571428572e-06,
"loss": 0.2013,
"step": 200
},
{
"epoch": 44.44444444444444,
"eval_loss": 0.5929003953933716,
"eval_runtime": 20.3009,
"eval_samples_per_second": 4.187,
"eval_steps_per_second": 0.099,
"eval_wer": 0.9850046860356139,
"step": 200
},
{
"epoch": 46.666666666666664,
"grad_norm": 2.755575656890869,
"learning_rate": 5.485714285714287e-06,
"loss": 0.1848,
"step": 210
},
{
"epoch": 48.888888888888886,
"grad_norm": 3.3562865257263184,
"learning_rate": 5.2e-06,
"loss": 0.1697,
"step": 220
},
{
"epoch": 51.111111111111114,
"grad_norm": 6.205230236053467,
"learning_rate": 4.9142857142857145e-06,
"loss": 0.1577,
"step": 230
},
{
"epoch": 53.333333333333336,
"grad_norm": 4.571589946746826,
"learning_rate": 4.628571428571429e-06,
"loss": 0.148,
"step": 240
},
{
"epoch": 55.55555555555556,
"grad_norm": 4.679986476898193,
"learning_rate": 4.342857142857143e-06,
"loss": 0.1369,
"step": 250
},
{
"epoch": 55.55555555555556,
"eval_loss": 0.6405652761459351,
"eval_runtime": 20.0641,
"eval_samples_per_second": 4.236,
"eval_steps_per_second": 0.1,
"eval_wer": 0.9656160458452722,
"step": 250
},
{
"epoch": 57.77777777777778,
"grad_norm": 3.558831214904785,
"learning_rate": 4.057142857142858e-06,
"loss": 0.1251,
"step": 260
},
{
"epoch": 60.0,
"grad_norm": 2.806180000305176,
"learning_rate": 3.771428571428572e-06,
"loss": 0.1162,
"step": 270
},
{
"epoch": 62.22222222222222,
"grad_norm": 3.5792713165283203,
"learning_rate": 3.4857142857142863e-06,
"loss": 0.1119,
"step": 280
},
{
"epoch": 64.44444444444444,
"grad_norm": 3.1600425243377686,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.1079,
"step": 290
},
{
"epoch": 66.66666666666667,
"grad_norm": 2.9095730781555176,
"learning_rate": 2.9142857142857146e-06,
"loss": 0.1047,
"step": 300
},
{
"epoch": 66.66666666666667,
"eval_loss": 0.7073546648025513,
"eval_runtime": 22.6057,
"eval_samples_per_second": 3.76,
"eval_steps_per_second": 0.088,
"eval_wer": 0.972833365219055,
"step": 300
},
{
"epoch": 68.88888888888889,
"grad_norm": 3.2166309356689453,
"learning_rate": 2.6285714285714286e-06,
"loss": 0.1119,
"step": 310
},
{
"epoch": 71.11111111111111,
"grad_norm": 4.965418815612793,
"learning_rate": 2.342857142857143e-06,
"loss": 0.1101,
"step": 320
},
{
"epoch": 73.33333333333333,
"grad_norm": 2.6848812103271484,
"learning_rate": 2.0571428571428573e-06,
"loss": 0.0907,
"step": 330
},
{
"epoch": 75.55555555555556,
"grad_norm": 2.8126628398895264,
"learning_rate": 1.7714285714285714e-06,
"loss": 0.0854,
"step": 340
},
{
"epoch": 77.77777777777777,
"grad_norm": 2.887021780014038,
"learning_rate": 1.4857142857142858e-06,
"loss": 0.0819,
"step": 350
},
{
"epoch": 77.77777777777777,
"eval_loss": 0.6702935099601746,
"eval_runtime": 20.1033,
"eval_samples_per_second": 4.228,
"eval_steps_per_second": 0.099,
"eval_wer": 0.9627518053971874,
"step": 350
},
{
"epoch": 80.0,
"grad_norm": 2.5661635398864746,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.0825,
"step": 360
},
{
"epoch": 82.22222222222223,
"grad_norm": 2.133279800415039,
"learning_rate": 9.142857142857144e-07,
"loss": 0.0783,
"step": 370
},
{
"epoch": 84.44444444444444,
"grad_norm": 1.9323846101760864,
"learning_rate": 6.285714285714287e-07,
"loss": 0.0767,
"step": 380
},
{
"epoch": 86.66666666666667,
"grad_norm": 1.892685890197754,
"learning_rate": 3.428571428571429e-07,
"loss": 0.0758,
"step": 390
},
{
"epoch": 88.88888888888889,
"grad_norm": 1.9149274826049805,
"learning_rate": 5.714285714285715e-08,
"loss": 0.0728,
"step": 400
},
{
"epoch": 88.88888888888889,
"eval_loss": 0.6913564801216125,
"eval_runtime": 20.0702,
"eval_samples_per_second": 4.235,
"eval_steps_per_second": 0.1,
"eval_wer": 0.9502835538752363,
"step": 400
},
{
"epoch": 88.88888888888889,
"step": 400,
"total_flos": 1.846946575637545e+19,
"train_loss": 0.28228739023208615,
"train_runtime": 3388.702,
"train_samples_per_second": 20.834,
"train_steps_per_second": 0.118
}
],
"logging_steps": 10,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.846946575637545e+19,
"train_batch_size": 40,
"trial_name": null,
"trial_params": null
}