whisper-small-finetune_levantine / trainer_state.json
otozz's picture
Upload 9 files
4ec9183 verified
{
"best_metric": 71.37643973087012,
"best_model_checkpoint": "./whisper-small-finetune_levantine/checkpoint-4000",
"epoch": 3.5087719298245617,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 24.49530792236328,
"learning_rate": 5.000000000000001e-07,
"loss": 1.6584,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 26.777080535888672,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.5114,
"step": 50
},
{
"epoch": 0.05,
"grad_norm": 20.427642822265625,
"learning_rate": 1.5e-06,
"loss": 1.4767,
"step": 75
},
{
"epoch": 0.07,
"grad_norm": 21.08566665649414,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.411,
"step": 100
},
{
"epoch": 0.09,
"grad_norm": 19.165359497070312,
"learning_rate": 2.5e-06,
"loss": 1.415,
"step": 125
},
{
"epoch": 0.11,
"grad_norm": 21.769067764282227,
"learning_rate": 3e-06,
"loss": 1.2559,
"step": 150
},
{
"epoch": 0.12,
"grad_norm": 24.020814895629883,
"learning_rate": 3.5e-06,
"loss": 1.3334,
"step": 175
},
{
"epoch": 0.14,
"grad_norm": 18.758386611938477,
"learning_rate": 4.000000000000001e-06,
"loss": 1.1788,
"step": 200
},
{
"epoch": 0.16,
"grad_norm": 24.74569320678711,
"learning_rate": 4.5e-06,
"loss": 1.3336,
"step": 225
},
{
"epoch": 0.18,
"grad_norm": 20.71479034423828,
"learning_rate": 5e-06,
"loss": 1.266,
"step": 250
},
{
"epoch": 0.19,
"grad_norm": 22.093847274780273,
"learning_rate": 5.500000000000001e-06,
"loss": 1.2522,
"step": 275
},
{
"epoch": 0.21,
"grad_norm": 20.75282859802246,
"learning_rate": 6e-06,
"loss": 1.364,
"step": 300
},
{
"epoch": 0.23,
"grad_norm": 19.298858642578125,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.1445,
"step": 325
},
{
"epoch": 0.25,
"grad_norm": 19.872005462646484,
"learning_rate": 7e-06,
"loss": 1.2237,
"step": 350
},
{
"epoch": 0.26,
"grad_norm": 24.68609619140625,
"learning_rate": 7.500000000000001e-06,
"loss": 1.1531,
"step": 375
},
{
"epoch": 0.28,
"grad_norm": 19.604598999023438,
"learning_rate": 8.000000000000001e-06,
"loss": 1.265,
"step": 400
},
{
"epoch": 0.3,
"grad_norm": 20.639904022216797,
"learning_rate": 8.5e-06,
"loss": 1.2187,
"step": 425
},
{
"epoch": 0.32,
"grad_norm": 20.088167190551758,
"learning_rate": 9e-06,
"loss": 1.2555,
"step": 450
},
{
"epoch": 0.33,
"grad_norm": 18.40792465209961,
"learning_rate": 9.5e-06,
"loss": 1.2229,
"step": 475
},
{
"epoch": 0.35,
"grad_norm": 18.657146453857422,
"learning_rate": 1e-05,
"loss": 1.2368,
"step": 500
},
{
"epoch": 0.37,
"grad_norm": 18.283592224121094,
"learning_rate": 9.944444444444445e-06,
"loss": 1.2768,
"step": 525
},
{
"epoch": 0.39,
"grad_norm": 20.509708404541016,
"learning_rate": 9.88888888888889e-06,
"loss": 1.2746,
"step": 550
},
{
"epoch": 0.4,
"grad_norm": 29.214723587036133,
"learning_rate": 9.833333333333333e-06,
"loss": 1.2714,
"step": 575
},
{
"epoch": 0.42,
"grad_norm": 22.353967666625977,
"learning_rate": 9.777777777777779e-06,
"loss": 1.2722,
"step": 600
},
{
"epoch": 0.44,
"grad_norm": 18.456220626831055,
"learning_rate": 9.722222222222223e-06,
"loss": 1.1404,
"step": 625
},
{
"epoch": 0.46,
"grad_norm": 13.63076114654541,
"learning_rate": 9.666666666666667e-06,
"loss": 1.1851,
"step": 650
},
{
"epoch": 0.47,
"grad_norm": 18.397729873657227,
"learning_rate": 9.611111111111112e-06,
"loss": 1.2517,
"step": 675
},
{
"epoch": 0.49,
"grad_norm": 21.700220108032227,
"learning_rate": 9.555555555555556e-06,
"loss": 1.1377,
"step": 700
},
{
"epoch": 0.51,
"grad_norm": 21.407974243164062,
"learning_rate": 9.5e-06,
"loss": 1.167,
"step": 725
},
{
"epoch": 0.53,
"grad_norm": 23.072011947631836,
"learning_rate": 9.444444444444445e-06,
"loss": 1.1425,
"step": 750
},
{
"epoch": 0.54,
"grad_norm": 18.679073333740234,
"learning_rate": 9.38888888888889e-06,
"loss": 1.3557,
"step": 775
},
{
"epoch": 0.56,
"grad_norm": 17.260936737060547,
"learning_rate": 9.333333333333334e-06,
"loss": 1.1378,
"step": 800
},
{
"epoch": 0.58,
"grad_norm": 21.285743713378906,
"learning_rate": 9.277777777777778e-06,
"loss": 1.1549,
"step": 825
},
{
"epoch": 0.6,
"grad_norm": 15.386796951293945,
"learning_rate": 9.222222222222224e-06,
"loss": 1.1067,
"step": 850
},
{
"epoch": 0.61,
"grad_norm": 16.645769119262695,
"learning_rate": 9.166666666666666e-06,
"loss": 1.0842,
"step": 875
},
{
"epoch": 0.63,
"grad_norm": 20.210805892944336,
"learning_rate": 9.111111111111112e-06,
"loss": 1.2224,
"step": 900
},
{
"epoch": 0.65,
"grad_norm": 16.632762908935547,
"learning_rate": 9.055555555555556e-06,
"loss": 1.1574,
"step": 925
},
{
"epoch": 0.67,
"grad_norm": 18.083099365234375,
"learning_rate": 9e-06,
"loss": 1.0843,
"step": 950
},
{
"epoch": 0.68,
"grad_norm": 17.896181106567383,
"learning_rate": 8.944444444444446e-06,
"loss": 1.1495,
"step": 975
},
{
"epoch": 0.7,
"grad_norm": 17.45163917541504,
"learning_rate": 8.888888888888888e-06,
"loss": 1.1664,
"step": 1000
},
{
"epoch": 0.7,
"eval_cer": 49.3922912710928,
"eval_loss": 1.1463403701782227,
"eval_runtime": 691.8556,
"eval_samples_per_second": 4.119,
"eval_steps_per_second": 0.516,
"eval_wer": 74.55810240620367,
"step": 1000
},
{
"epoch": 0.72,
"grad_norm": 15.581072807312012,
"learning_rate": 8.833333333333334e-06,
"loss": 1.103,
"step": 1025
},
{
"epoch": 0.74,
"grad_norm": 24.887859344482422,
"learning_rate": 8.777777777777778e-06,
"loss": 1.1038,
"step": 1050
},
{
"epoch": 0.75,
"grad_norm": 18.880041122436523,
"learning_rate": 8.722222222222224e-06,
"loss": 1.1027,
"step": 1075
},
{
"epoch": 0.77,
"grad_norm": 15.563136100769043,
"learning_rate": 8.666666666666668e-06,
"loss": 1.1111,
"step": 1100
},
{
"epoch": 0.79,
"grad_norm": 15.4989595413208,
"learning_rate": 8.611111111111112e-06,
"loss": 1.1109,
"step": 1125
},
{
"epoch": 0.81,
"grad_norm": 12.751100540161133,
"learning_rate": 8.555555555555556e-06,
"loss": 1.1971,
"step": 1150
},
{
"epoch": 0.82,
"grad_norm": 14.376218795776367,
"learning_rate": 8.5e-06,
"loss": 1.1702,
"step": 1175
},
{
"epoch": 0.84,
"grad_norm": 23.62881851196289,
"learning_rate": 8.444444444444446e-06,
"loss": 1.162,
"step": 1200
},
{
"epoch": 0.86,
"grad_norm": 18.038660049438477,
"learning_rate": 8.38888888888889e-06,
"loss": 1.1106,
"step": 1225
},
{
"epoch": 0.88,
"grad_norm": 18.63202667236328,
"learning_rate": 8.333333333333334e-06,
"loss": 1.0821,
"step": 1250
},
{
"epoch": 0.89,
"grad_norm": 21.305559158325195,
"learning_rate": 8.277777777777778e-06,
"loss": 1.1619,
"step": 1275
},
{
"epoch": 0.91,
"grad_norm": 23.507896423339844,
"learning_rate": 8.222222222222222e-06,
"loss": 1.1462,
"step": 1300
},
{
"epoch": 0.93,
"grad_norm": 24.86905288696289,
"learning_rate": 8.166666666666668e-06,
"loss": 1.2142,
"step": 1325
},
{
"epoch": 0.95,
"grad_norm": 19.211929321289062,
"learning_rate": 8.111111111111112e-06,
"loss": 1.153,
"step": 1350
},
{
"epoch": 0.96,
"grad_norm": 16.071701049804688,
"learning_rate": 8.055555555555557e-06,
"loss": 1.1604,
"step": 1375
},
{
"epoch": 0.98,
"grad_norm": 20.383394241333008,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2233,
"step": 1400
},
{
"epoch": 1.0,
"grad_norm": 25.448518753051758,
"learning_rate": 7.944444444444445e-06,
"loss": 1.0069,
"step": 1425
},
{
"epoch": 1.02,
"grad_norm": 11.47100830078125,
"learning_rate": 7.88888888888889e-06,
"loss": 0.8217,
"step": 1450
},
{
"epoch": 1.04,
"grad_norm": 12.604474067687988,
"learning_rate": 7.833333333333333e-06,
"loss": 0.8894,
"step": 1475
},
{
"epoch": 1.05,
"grad_norm": 11.27602481842041,
"learning_rate": 7.77777777777778e-06,
"loss": 0.8474,
"step": 1500
},
{
"epoch": 1.07,
"grad_norm": 17.379961013793945,
"learning_rate": 7.722222222222223e-06,
"loss": 0.8747,
"step": 1525
},
{
"epoch": 1.09,
"grad_norm": 15.369179725646973,
"learning_rate": 7.666666666666667e-06,
"loss": 0.826,
"step": 1550
},
{
"epoch": 1.11,
"grad_norm": 14.735352516174316,
"learning_rate": 7.611111111111111e-06,
"loss": 0.8508,
"step": 1575
},
{
"epoch": 1.12,
"grad_norm": 17.533004760742188,
"learning_rate": 7.555555555555556e-06,
"loss": 0.8139,
"step": 1600
},
{
"epoch": 1.14,
"grad_norm": 18.504226684570312,
"learning_rate": 7.500000000000001e-06,
"loss": 0.7383,
"step": 1625
},
{
"epoch": 1.16,
"grad_norm": 14.30873966217041,
"learning_rate": 7.444444444444445e-06,
"loss": 0.8406,
"step": 1650
},
{
"epoch": 1.18,
"grad_norm": 16.69165802001953,
"learning_rate": 7.38888888888889e-06,
"loss": 0.8421,
"step": 1675
},
{
"epoch": 1.19,
"grad_norm": 19.381628036499023,
"learning_rate": 7.333333333333333e-06,
"loss": 0.8858,
"step": 1700
},
{
"epoch": 1.21,
"grad_norm": 13.489551544189453,
"learning_rate": 7.277777777777778e-06,
"loss": 0.9216,
"step": 1725
},
{
"epoch": 1.23,
"grad_norm": 12.954667091369629,
"learning_rate": 7.222222222222223e-06,
"loss": 0.8289,
"step": 1750
},
{
"epoch": 1.25,
"grad_norm": 16.877836227416992,
"learning_rate": 7.166666666666667e-06,
"loss": 0.9608,
"step": 1775
},
{
"epoch": 1.26,
"grad_norm": 22.067745208740234,
"learning_rate": 7.111111111111112e-06,
"loss": 0.8079,
"step": 1800
},
{
"epoch": 1.28,
"grad_norm": 14.56628131866455,
"learning_rate": 7.055555555555557e-06,
"loss": 0.8918,
"step": 1825
},
{
"epoch": 1.3,
"grad_norm": 18.21314811706543,
"learning_rate": 7e-06,
"loss": 0.8302,
"step": 1850
},
{
"epoch": 1.32,
"grad_norm": 18.223928451538086,
"learning_rate": 6.944444444444445e-06,
"loss": 0.8743,
"step": 1875
},
{
"epoch": 1.33,
"grad_norm": 9.887601852416992,
"learning_rate": 6.88888888888889e-06,
"loss": 0.8838,
"step": 1900
},
{
"epoch": 1.35,
"grad_norm": 22.714948654174805,
"learning_rate": 6.833333333333334e-06,
"loss": 0.8611,
"step": 1925
},
{
"epoch": 1.37,
"grad_norm": 17.669092178344727,
"learning_rate": 6.777777777777779e-06,
"loss": 0.8815,
"step": 1950
},
{
"epoch": 1.39,
"grad_norm": 17.025781631469727,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.8293,
"step": 1975
},
{
"epoch": 1.4,
"grad_norm": 15.673608779907227,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8629,
"step": 2000
},
{
"epoch": 1.4,
"eval_cer": 52.48608116371126,
"eval_loss": 1.1019113063812256,
"eval_runtime": 711.271,
"eval_samples_per_second": 4.007,
"eval_steps_per_second": 0.502,
"eval_wer": 77.20948796898163,
"step": 2000
},
{
"epoch": 1.42,
"grad_norm": 18.508005142211914,
"learning_rate": 6.6111111111111115e-06,
"loss": 0.8398,
"step": 2025
},
{
"epoch": 1.44,
"grad_norm": 18.747060775756836,
"learning_rate": 6.555555555555556e-06,
"loss": 0.9232,
"step": 2050
},
{
"epoch": 1.46,
"grad_norm": 20.83848762512207,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.8977,
"step": 2075
},
{
"epoch": 1.47,
"grad_norm": 19.12614631652832,
"learning_rate": 6.444444444444445e-06,
"loss": 0.84,
"step": 2100
},
{
"epoch": 1.49,
"grad_norm": 15.908050537109375,
"learning_rate": 6.3888888888888885e-06,
"loss": 0.9378,
"step": 2125
},
{
"epoch": 1.51,
"grad_norm": 12.05090045928955,
"learning_rate": 6.333333333333333e-06,
"loss": 0.7247,
"step": 2150
},
{
"epoch": 1.53,
"grad_norm": 16.157573699951172,
"learning_rate": 6.277777777777778e-06,
"loss": 0.8945,
"step": 2175
},
{
"epoch": 1.54,
"grad_norm": 20.49693489074707,
"learning_rate": 6.222222222222223e-06,
"loss": 0.8605,
"step": 2200
},
{
"epoch": 1.56,
"grad_norm": 15.67389965057373,
"learning_rate": 6.166666666666667e-06,
"loss": 0.8405,
"step": 2225
},
{
"epoch": 1.58,
"grad_norm": 14.528761863708496,
"learning_rate": 6.111111111111112e-06,
"loss": 0.8315,
"step": 2250
},
{
"epoch": 1.6,
"grad_norm": 21.247663497924805,
"learning_rate": 6.055555555555555e-06,
"loss": 0.8511,
"step": 2275
},
{
"epoch": 1.61,
"grad_norm": 19.025577545166016,
"learning_rate": 6e-06,
"loss": 0.8066,
"step": 2300
},
{
"epoch": 1.63,
"grad_norm": 20.215118408203125,
"learning_rate": 5.944444444444445e-06,
"loss": 0.8249,
"step": 2325
},
{
"epoch": 1.65,
"grad_norm": 14.20770263671875,
"learning_rate": 5.88888888888889e-06,
"loss": 0.8892,
"step": 2350
},
{
"epoch": 1.67,
"grad_norm": 12.155647277832031,
"learning_rate": 5.833333333333334e-06,
"loss": 0.8731,
"step": 2375
},
{
"epoch": 1.68,
"grad_norm": 16.259695053100586,
"learning_rate": 5.777777777777778e-06,
"loss": 0.7707,
"step": 2400
},
{
"epoch": 1.7,
"grad_norm": 18.88398551940918,
"learning_rate": 5.722222222222222e-06,
"loss": 0.8606,
"step": 2425
},
{
"epoch": 1.72,
"grad_norm": 14.643707275390625,
"learning_rate": 5.666666666666667e-06,
"loss": 0.8134,
"step": 2450
},
{
"epoch": 1.74,
"grad_norm": 14.3607177734375,
"learning_rate": 5.611111111111112e-06,
"loss": 0.8725,
"step": 2475
},
{
"epoch": 1.75,
"grad_norm": 23.07048225402832,
"learning_rate": 5.555555555555557e-06,
"loss": 0.9089,
"step": 2500
},
{
"epoch": 1.77,
"grad_norm": 13.498936653137207,
"learning_rate": 5.500000000000001e-06,
"loss": 0.882,
"step": 2525
},
{
"epoch": 1.79,
"grad_norm": 16.4595947265625,
"learning_rate": 5.444444444444445e-06,
"loss": 0.8559,
"step": 2550
},
{
"epoch": 1.81,
"grad_norm": 14.928861618041992,
"learning_rate": 5.388888888888889e-06,
"loss": 0.7985,
"step": 2575
},
{
"epoch": 1.82,
"grad_norm": 12.721780776977539,
"learning_rate": 5.333333333333334e-06,
"loss": 0.8446,
"step": 2600
},
{
"epoch": 1.84,
"grad_norm": 15.892865180969238,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.8533,
"step": 2625
},
{
"epoch": 1.86,
"grad_norm": 15.438478469848633,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.8228,
"step": 2650
},
{
"epoch": 1.88,
"grad_norm": 15.4628324508667,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.8099,
"step": 2675
},
{
"epoch": 1.89,
"grad_norm": 22.47260856628418,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.7983,
"step": 2700
},
{
"epoch": 1.91,
"grad_norm": 14.621177673339844,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.7938,
"step": 2725
},
{
"epoch": 1.93,
"grad_norm": 17.526037216186523,
"learning_rate": 5e-06,
"loss": 0.7927,
"step": 2750
},
{
"epoch": 1.95,
"grad_norm": 18.350086212158203,
"learning_rate": 4.944444444444445e-06,
"loss": 0.7404,
"step": 2775
},
{
"epoch": 1.96,
"grad_norm": 13.313730239868164,
"learning_rate": 4.888888888888889e-06,
"loss": 0.8781,
"step": 2800
},
{
"epoch": 1.98,
"grad_norm": 13.593079566955566,
"learning_rate": 4.833333333333333e-06,
"loss": 0.7972,
"step": 2825
},
{
"epoch": 2.0,
"grad_norm": 21.296812057495117,
"learning_rate": 4.777777777777778e-06,
"loss": 0.7721,
"step": 2850
},
{
"epoch": 2.02,
"grad_norm": 15.535265922546387,
"learning_rate": 4.722222222222222e-06,
"loss": 0.6832,
"step": 2875
},
{
"epoch": 2.04,
"grad_norm": 14.036383628845215,
"learning_rate": 4.666666666666667e-06,
"loss": 0.6262,
"step": 2900
},
{
"epoch": 2.05,
"grad_norm": 12.284743309020996,
"learning_rate": 4.611111111111112e-06,
"loss": 0.5659,
"step": 2925
},
{
"epoch": 2.07,
"grad_norm": 13.954039573669434,
"learning_rate": 4.555555555555556e-06,
"loss": 0.5496,
"step": 2950
},
{
"epoch": 2.09,
"grad_norm": 16.266000747680664,
"learning_rate": 4.5e-06,
"loss": 0.5937,
"step": 2975
},
{
"epoch": 2.11,
"grad_norm": 17.434391021728516,
"learning_rate": 4.444444444444444e-06,
"loss": 0.5846,
"step": 3000
},
{
"epoch": 2.11,
"eval_cer": 47.78639547731686,
"eval_loss": 1.1020084619522095,
"eval_runtime": 697.2267,
"eval_samples_per_second": 4.088,
"eval_steps_per_second": 0.512,
"eval_wer": 72.4027825293648,
"step": 3000
},
{
"epoch": 2.12,
"grad_norm": 17.443408966064453,
"learning_rate": 4.388888888888889e-06,
"loss": 0.5241,
"step": 3025
},
{
"epoch": 2.14,
"grad_norm": 10.447083473205566,
"learning_rate": 4.333333333333334e-06,
"loss": 0.5926,
"step": 3050
},
{
"epoch": 2.16,
"grad_norm": 19.269094467163086,
"learning_rate": 4.277777777777778e-06,
"loss": 0.5737,
"step": 3075
},
{
"epoch": 2.18,
"grad_norm": 16.834049224853516,
"learning_rate": 4.222222222222223e-06,
"loss": 0.5023,
"step": 3100
},
{
"epoch": 2.19,
"grad_norm": 18.942148208618164,
"learning_rate": 4.166666666666667e-06,
"loss": 0.6057,
"step": 3125
},
{
"epoch": 2.21,
"grad_norm": 13.578940391540527,
"learning_rate": 4.111111111111111e-06,
"loss": 0.6517,
"step": 3150
},
{
"epoch": 2.23,
"grad_norm": 13.73069953918457,
"learning_rate": 4.055555555555556e-06,
"loss": 0.6041,
"step": 3175
},
{
"epoch": 2.25,
"grad_norm": 14.027520179748535,
"learning_rate": 4.000000000000001e-06,
"loss": 0.5526,
"step": 3200
},
{
"epoch": 2.26,
"grad_norm": 8.801058769226074,
"learning_rate": 3.944444444444445e-06,
"loss": 0.5499,
"step": 3225
},
{
"epoch": 2.28,
"grad_norm": 15.136823654174805,
"learning_rate": 3.88888888888889e-06,
"loss": 0.639,
"step": 3250
},
{
"epoch": 2.3,
"grad_norm": 12.88554859161377,
"learning_rate": 3.833333333333334e-06,
"loss": 0.5577,
"step": 3275
},
{
"epoch": 2.32,
"grad_norm": 14.318121910095215,
"learning_rate": 3.777777777777778e-06,
"loss": 0.6627,
"step": 3300
},
{
"epoch": 2.33,
"grad_norm": 13.512727737426758,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.6483,
"step": 3325
},
{
"epoch": 2.35,
"grad_norm": 17.256370544433594,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.5252,
"step": 3350
},
{
"epoch": 2.37,
"grad_norm": 15.82744026184082,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.5955,
"step": 3375
},
{
"epoch": 2.39,
"grad_norm": 15.740201950073242,
"learning_rate": 3.555555555555556e-06,
"loss": 0.594,
"step": 3400
},
{
"epoch": 2.4,
"grad_norm": 11.461688041687012,
"learning_rate": 3.5e-06,
"loss": 0.5577,
"step": 3425
},
{
"epoch": 2.42,
"grad_norm": 11.742351531982422,
"learning_rate": 3.444444444444445e-06,
"loss": 0.5899,
"step": 3450
},
{
"epoch": 2.44,
"grad_norm": 12.598740577697754,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.6051,
"step": 3475
},
{
"epoch": 2.46,
"grad_norm": 13.464423179626465,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.5789,
"step": 3500
},
{
"epoch": 2.47,
"grad_norm": 18.55891990661621,
"learning_rate": 3.277777777777778e-06,
"loss": 0.627,
"step": 3525
},
{
"epoch": 2.49,
"grad_norm": 15.414612770080566,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.5117,
"step": 3550
},
{
"epoch": 2.51,
"grad_norm": 12.111812591552734,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.5448,
"step": 3575
},
{
"epoch": 2.53,
"grad_norm": 13.01075267791748,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.5588,
"step": 3600
},
{
"epoch": 2.54,
"grad_norm": 10.141874313354492,
"learning_rate": 3.055555555555556e-06,
"loss": 0.6379,
"step": 3625
},
{
"epoch": 2.56,
"grad_norm": 16.219833374023438,
"learning_rate": 3e-06,
"loss": 0.5916,
"step": 3650
},
{
"epoch": 2.58,
"grad_norm": 15.017302513122559,
"learning_rate": 2.944444444444445e-06,
"loss": 0.5663,
"step": 3675
},
{
"epoch": 2.6,
"grad_norm": 13.934078216552734,
"learning_rate": 2.888888888888889e-06,
"loss": 0.5667,
"step": 3700
},
{
"epoch": 2.61,
"grad_norm": 21.70482635498047,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.5694,
"step": 3725
},
{
"epoch": 2.63,
"grad_norm": 13.521171569824219,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.5453,
"step": 3750
},
{
"epoch": 2.65,
"grad_norm": 14.25871753692627,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.568,
"step": 3775
},
{
"epoch": 2.67,
"grad_norm": 15.246468544006348,
"learning_rate": 2.666666666666667e-06,
"loss": 0.6255,
"step": 3800
},
{
"epoch": 2.68,
"grad_norm": 12.953627586364746,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.597,
"step": 3825
},
{
"epoch": 2.7,
"grad_norm": 13.217804908752441,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.5954,
"step": 3850
},
{
"epoch": 2.72,
"grad_norm": 9.510717391967773,
"learning_rate": 2.5e-06,
"loss": 0.5497,
"step": 3875
},
{
"epoch": 2.74,
"grad_norm": 14.46406078338623,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.6277,
"step": 3900
},
{
"epoch": 2.75,
"grad_norm": 9.987096786499023,
"learning_rate": 2.388888888888889e-06,
"loss": 0.6343,
"step": 3925
},
{
"epoch": 2.77,
"grad_norm": 20.157745361328125,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.5747,
"step": 3950
},
{
"epoch": 2.79,
"grad_norm": 13.586224555969238,
"learning_rate": 2.277777777777778e-06,
"loss": 0.5702,
"step": 3975
},
{
"epoch": 2.81,
"grad_norm": 12.312579154968262,
"learning_rate": 2.222222222222222e-06,
"loss": 0.5732,
"step": 4000
},
{
"epoch": 2.81,
"eval_cer": 44.95220931355196,
"eval_loss": 1.0969101190567017,
"eval_runtime": 680.5084,
"eval_samples_per_second": 4.188,
"eval_steps_per_second": 0.525,
"eval_wer": 71.37643973087012,
"step": 4000
},
{
"epoch": 2.82,
"grad_norm": 13.381065368652344,
"learning_rate": 2.166666666666667e-06,
"loss": 0.6482,
"step": 4025
},
{
"epoch": 2.84,
"grad_norm": 14.35582447052002,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.562,
"step": 4050
},
{
"epoch": 2.86,
"grad_norm": 13.286849975585938,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.5772,
"step": 4075
},
{
"epoch": 2.88,
"grad_norm": 15.850845336914062,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.5406,
"step": 4100
},
{
"epoch": 2.89,
"grad_norm": 14.659579277038574,
"learning_rate": 1.944444444444445e-06,
"loss": 0.5307,
"step": 4125
},
{
"epoch": 2.91,
"grad_norm": 14.19031810760498,
"learning_rate": 1.888888888888889e-06,
"loss": 0.5941,
"step": 4150
},
{
"epoch": 2.93,
"grad_norm": 15.247186660766602,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.5901,
"step": 4175
},
{
"epoch": 2.95,
"grad_norm": 20.510732650756836,
"learning_rate": 1.777777777777778e-06,
"loss": 0.5564,
"step": 4200
},
{
"epoch": 2.96,
"grad_norm": 17.80103874206543,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.595,
"step": 4225
},
{
"epoch": 2.98,
"grad_norm": 17.83429718017578,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.6067,
"step": 4250
},
{
"epoch": 3.0,
"grad_norm": 25.276424407958984,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.5515,
"step": 4275
},
{
"epoch": 3.02,
"grad_norm": 10.023052215576172,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.4601,
"step": 4300
},
{
"epoch": 3.04,
"grad_norm": 13.198165893554688,
"learning_rate": 1.5e-06,
"loss": 0.4379,
"step": 4325
},
{
"epoch": 3.05,
"grad_norm": 10.112595558166504,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.4504,
"step": 4350
},
{
"epoch": 3.07,
"grad_norm": 11.835315704345703,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.4759,
"step": 4375
},
{
"epoch": 3.09,
"grad_norm": 9.861777305603027,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.3991,
"step": 4400
},
{
"epoch": 3.11,
"grad_norm": 10.269206047058105,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.4102,
"step": 4425
},
{
"epoch": 3.12,
"grad_norm": 10.480530738830566,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.4027,
"step": 4450
},
{
"epoch": 3.14,
"grad_norm": 8.999235153198242,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.4232,
"step": 4475
},
{
"epoch": 3.16,
"grad_norm": 11.057938575744629,
"learning_rate": 1.111111111111111e-06,
"loss": 0.3943,
"step": 4500
},
{
"epoch": 3.18,
"grad_norm": 11.833608627319336,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.3893,
"step": 4525
},
{
"epoch": 3.19,
"grad_norm": 12.620944023132324,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.435,
"step": 4550
},
{
"epoch": 3.21,
"grad_norm": 11.93198299407959,
"learning_rate": 9.444444444444445e-07,
"loss": 0.4328,
"step": 4575
},
{
"epoch": 3.23,
"grad_norm": 8.604935646057129,
"learning_rate": 8.88888888888889e-07,
"loss": 0.427,
"step": 4600
},
{
"epoch": 3.25,
"grad_norm": 11.086129188537598,
"learning_rate": 8.333333333333333e-07,
"loss": 0.4351,
"step": 4625
},
{
"epoch": 3.26,
"grad_norm": 17.6994571685791,
"learning_rate": 7.777777777777779e-07,
"loss": 0.4108,
"step": 4650
},
{
"epoch": 3.28,
"grad_norm": 9.501194953918457,
"learning_rate": 7.222222222222222e-07,
"loss": 0.4369,
"step": 4675
},
{
"epoch": 3.3,
"grad_norm": 12.139167785644531,
"learning_rate": 6.666666666666667e-07,
"loss": 0.4041,
"step": 4700
},
{
"epoch": 3.32,
"grad_norm": 8.777289390563965,
"learning_rate": 6.111111111111112e-07,
"loss": 0.4543,
"step": 4725
},
{
"epoch": 3.33,
"grad_norm": 8.665961265563965,
"learning_rate": 5.555555555555555e-07,
"loss": 0.4471,
"step": 4750
},
{
"epoch": 3.35,
"grad_norm": 15.121393203735352,
"learning_rate": 5.000000000000001e-07,
"loss": 0.5016,
"step": 4775
},
{
"epoch": 3.37,
"grad_norm": 14.372379302978516,
"learning_rate": 4.444444444444445e-07,
"loss": 0.4648,
"step": 4800
},
{
"epoch": 3.39,
"grad_norm": 9.729578018188477,
"learning_rate": 3.8888888888888895e-07,
"loss": 0.4787,
"step": 4825
},
{
"epoch": 3.4,
"grad_norm": 8.461750030517578,
"learning_rate": 3.3333333333333335e-07,
"loss": 0.3607,
"step": 4850
},
{
"epoch": 3.42,
"grad_norm": 13.118003845214844,
"learning_rate": 2.7777777777777776e-07,
"loss": 0.4459,
"step": 4875
},
{
"epoch": 3.44,
"grad_norm": 11.437525749206543,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.4861,
"step": 4900
},
{
"epoch": 3.46,
"grad_norm": 13.136241912841797,
"learning_rate": 1.6666666666666668e-07,
"loss": 0.482,
"step": 4925
},
{
"epoch": 3.47,
"grad_norm": 12.513566017150879,
"learning_rate": 1.1111111111111112e-07,
"loss": 0.4167,
"step": 4950
},
{
"epoch": 3.49,
"grad_norm": 10.880446434020996,
"learning_rate": 5.555555555555556e-08,
"loss": 0.3889,
"step": 4975
},
{
"epoch": 3.51,
"grad_norm": 10.85265827178955,
"learning_rate": 0.0,
"loss": 0.3676,
"step": 5000
},
{
"epoch": 3.51,
"eval_cer": 46.004569883821965,
"eval_loss": 1.1264092922210693,
"eval_runtime": 688.5775,
"eval_samples_per_second": 4.139,
"eval_steps_per_second": 0.518,
"eval_wer": 72.96727106853689,
"step": 5000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 1000,
"total_flos": 1.154081874419712e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}