whisper-small-finetune_iraqi / trainer_state.json
otozz's picture
Upload 9 files
83fa582 verified
{
"best_metric": 74.92953873650924,
"best_model_checkpoint": "./whisper-small-finetune_iraqi/checkpoint-3000",
"epoch": 3.977724741447892,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 31.307842254638672,
"learning_rate": 5.000000000000001e-07,
"loss": 1.8461,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 24.101598739624023,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.8684,
"step": 50
},
{
"epoch": 0.06,
"grad_norm": 32.56498336791992,
"learning_rate": 1.5e-06,
"loss": 1.6799,
"step": 75
},
{
"epoch": 0.08,
"grad_norm": 26.423107147216797,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.8388,
"step": 100
},
{
"epoch": 0.1,
"grad_norm": 34.08100128173828,
"learning_rate": 2.5e-06,
"loss": 1.5596,
"step": 125
},
{
"epoch": 0.12,
"grad_norm": 29.480880737304688,
"learning_rate": 3e-06,
"loss": 1.741,
"step": 150
},
{
"epoch": 0.14,
"grad_norm": 20.32571792602539,
"learning_rate": 3.5e-06,
"loss": 1.5891,
"step": 175
},
{
"epoch": 0.16,
"grad_norm": 22.268383026123047,
"learning_rate": 4.000000000000001e-06,
"loss": 1.5566,
"step": 200
},
{
"epoch": 0.18,
"grad_norm": 25.940326690673828,
"learning_rate": 4.5e-06,
"loss": 1.5812,
"step": 225
},
{
"epoch": 0.2,
"grad_norm": 20.023916244506836,
"learning_rate": 5e-06,
"loss": 1.4958,
"step": 250
},
{
"epoch": 0.22,
"grad_norm": 24.432737350463867,
"learning_rate": 5.500000000000001e-06,
"loss": 1.5452,
"step": 275
},
{
"epoch": 0.24,
"grad_norm": 24.809738159179688,
"learning_rate": 6e-06,
"loss": 1.4721,
"step": 300
},
{
"epoch": 0.26,
"grad_norm": 26.316701889038086,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.4539,
"step": 325
},
{
"epoch": 0.28,
"grad_norm": 20.42281723022461,
"learning_rate": 7e-06,
"loss": 1.4232,
"step": 350
},
{
"epoch": 0.3,
"grad_norm": 23.9652156829834,
"learning_rate": 7.500000000000001e-06,
"loss": 1.3441,
"step": 375
},
{
"epoch": 0.32,
"grad_norm": 22.536012649536133,
"learning_rate": 8.000000000000001e-06,
"loss": 1.419,
"step": 400
},
{
"epoch": 0.34,
"grad_norm": 24.226743698120117,
"learning_rate": 8.5e-06,
"loss": 1.5099,
"step": 425
},
{
"epoch": 0.36,
"grad_norm": 23.327442169189453,
"learning_rate": 9e-06,
"loss": 1.451,
"step": 450
},
{
"epoch": 0.38,
"grad_norm": 21.532514572143555,
"learning_rate": 9.5e-06,
"loss": 1.3318,
"step": 475
},
{
"epoch": 0.4,
"grad_norm": 22.450647354125977,
"learning_rate": 1e-05,
"loss": 1.3831,
"step": 500
},
{
"epoch": 0.42,
"grad_norm": 23.443180084228516,
"learning_rate": 9.944444444444445e-06,
"loss": 1.3584,
"step": 525
},
{
"epoch": 0.44,
"grad_norm": 15.108329772949219,
"learning_rate": 9.88888888888889e-06,
"loss": 1.3257,
"step": 550
},
{
"epoch": 0.46,
"grad_norm": 21.810331344604492,
"learning_rate": 9.833333333333333e-06,
"loss": 1.3358,
"step": 575
},
{
"epoch": 0.48,
"grad_norm": 20.90282440185547,
"learning_rate": 9.777777777777779e-06,
"loss": 1.3223,
"step": 600
},
{
"epoch": 0.5,
"grad_norm": 24.409061431884766,
"learning_rate": 9.722222222222223e-06,
"loss": 1.3732,
"step": 625
},
{
"epoch": 0.52,
"grad_norm": 25.396240234375,
"learning_rate": 9.666666666666667e-06,
"loss": 1.4752,
"step": 650
},
{
"epoch": 0.54,
"grad_norm": 22.128602981567383,
"learning_rate": 9.611111111111112e-06,
"loss": 1.316,
"step": 675
},
{
"epoch": 0.56,
"grad_norm": 23.54676055908203,
"learning_rate": 9.555555555555556e-06,
"loss": 1.3471,
"step": 700
},
{
"epoch": 0.58,
"grad_norm": 19.39595603942871,
"learning_rate": 9.5e-06,
"loss": 1.3318,
"step": 725
},
{
"epoch": 0.6,
"grad_norm": 16.275251388549805,
"learning_rate": 9.444444444444445e-06,
"loss": 1.179,
"step": 750
},
{
"epoch": 0.62,
"grad_norm": 20.7484073638916,
"learning_rate": 9.38888888888889e-06,
"loss": 1.2591,
"step": 775
},
{
"epoch": 0.64,
"grad_norm": 25.535470962524414,
"learning_rate": 9.333333333333334e-06,
"loss": 1.2721,
"step": 800
},
{
"epoch": 0.66,
"grad_norm": 21.40460777282715,
"learning_rate": 9.277777777777778e-06,
"loss": 1.2564,
"step": 825
},
{
"epoch": 0.68,
"grad_norm": 23.19025993347168,
"learning_rate": 9.222222222222224e-06,
"loss": 1.2542,
"step": 850
},
{
"epoch": 0.7,
"grad_norm": 18.77362632751465,
"learning_rate": 9.166666666666666e-06,
"loss": 1.1861,
"step": 875
},
{
"epoch": 0.72,
"grad_norm": 27.533153533935547,
"learning_rate": 9.111111111111112e-06,
"loss": 1.3505,
"step": 900
},
{
"epoch": 0.74,
"grad_norm": 21.657012939453125,
"learning_rate": 9.055555555555556e-06,
"loss": 1.1985,
"step": 925
},
{
"epoch": 0.76,
"grad_norm": 27.935184478759766,
"learning_rate": 9e-06,
"loss": 1.2384,
"step": 950
},
{
"epoch": 0.78,
"grad_norm": 22.997709274291992,
"learning_rate": 8.944444444444446e-06,
"loss": 1.2963,
"step": 975
},
{
"epoch": 0.8,
"grad_norm": 22.01042938232422,
"learning_rate": 8.888888888888888e-06,
"loss": 1.2175,
"step": 1000
},
{
"epoch": 0.8,
"eval_cer": 53.03030303030303,
"eval_loss": 1.2452795505523682,
"eval_runtime": 621.4587,
"eval_samples_per_second": 4.044,
"eval_steps_per_second": 0.507,
"eval_wer": 81.205746889393,
"step": 1000
},
{
"epoch": 0.82,
"grad_norm": 22.637540817260742,
"learning_rate": 8.833333333333334e-06,
"loss": 1.3225,
"step": 1025
},
{
"epoch": 0.84,
"grad_norm": 22.166149139404297,
"learning_rate": 8.777777777777778e-06,
"loss": 1.2662,
"step": 1050
},
{
"epoch": 0.86,
"grad_norm": 21.192859649658203,
"learning_rate": 8.722222222222224e-06,
"loss": 1.2507,
"step": 1075
},
{
"epoch": 0.88,
"grad_norm": 25.296127319335938,
"learning_rate": 8.666666666666668e-06,
"loss": 1.3007,
"step": 1100
},
{
"epoch": 0.89,
"grad_norm": 19.297321319580078,
"learning_rate": 8.611111111111112e-06,
"loss": 1.2212,
"step": 1125
},
{
"epoch": 0.91,
"grad_norm": 24.768157958984375,
"learning_rate": 8.555555555555556e-06,
"loss": 1.2646,
"step": 1150
},
{
"epoch": 0.93,
"grad_norm": 13.836922645568848,
"learning_rate": 8.5e-06,
"loss": 1.1391,
"step": 1175
},
{
"epoch": 0.95,
"grad_norm": 18.015613555908203,
"learning_rate": 8.444444444444446e-06,
"loss": 1.3267,
"step": 1200
},
{
"epoch": 0.97,
"grad_norm": 18.187437057495117,
"learning_rate": 8.38888888888889e-06,
"loss": 1.1999,
"step": 1225
},
{
"epoch": 0.99,
"grad_norm": 20.086061477661133,
"learning_rate": 8.333333333333334e-06,
"loss": 1.2112,
"step": 1250
},
{
"epoch": 1.01,
"grad_norm": 14.848949432373047,
"learning_rate": 8.277777777777778e-06,
"loss": 1.0858,
"step": 1275
},
{
"epoch": 1.03,
"grad_norm": 18.931108474731445,
"learning_rate": 8.222222222222222e-06,
"loss": 0.904,
"step": 1300
},
{
"epoch": 1.05,
"grad_norm": 17.61419105529785,
"learning_rate": 8.166666666666668e-06,
"loss": 0.9141,
"step": 1325
},
{
"epoch": 1.07,
"grad_norm": 14.918651580810547,
"learning_rate": 8.111111111111112e-06,
"loss": 0.8404,
"step": 1350
},
{
"epoch": 1.09,
"grad_norm": 13.606977462768555,
"learning_rate": 8.055555555555557e-06,
"loss": 0.9239,
"step": 1375
},
{
"epoch": 1.11,
"grad_norm": 10.916210174560547,
"learning_rate": 8.000000000000001e-06,
"loss": 0.9142,
"step": 1400
},
{
"epoch": 1.13,
"grad_norm": 15.314620971679688,
"learning_rate": 7.944444444444445e-06,
"loss": 0.8994,
"step": 1425
},
{
"epoch": 1.15,
"grad_norm": 17.887939453125,
"learning_rate": 7.88888888888889e-06,
"loss": 0.9692,
"step": 1450
},
{
"epoch": 1.17,
"grad_norm": 18.108110427856445,
"learning_rate": 7.833333333333333e-06,
"loss": 1.0226,
"step": 1475
},
{
"epoch": 1.19,
"grad_norm": 19.22450828552246,
"learning_rate": 7.77777777777778e-06,
"loss": 0.9862,
"step": 1500
},
{
"epoch": 1.21,
"grad_norm": 16.892333984375,
"learning_rate": 7.722222222222223e-06,
"loss": 0.8676,
"step": 1525
},
{
"epoch": 1.23,
"grad_norm": 19.769046783447266,
"learning_rate": 7.666666666666667e-06,
"loss": 1.009,
"step": 1550
},
{
"epoch": 1.25,
"grad_norm": 17.185529708862305,
"learning_rate": 7.611111111111111e-06,
"loss": 0.9733,
"step": 1575
},
{
"epoch": 1.27,
"grad_norm": 16.545021057128906,
"learning_rate": 7.555555555555556e-06,
"loss": 0.8958,
"step": 1600
},
{
"epoch": 1.29,
"grad_norm": 12.434943199157715,
"learning_rate": 7.500000000000001e-06,
"loss": 0.8863,
"step": 1625
},
{
"epoch": 1.31,
"grad_norm": 17.79275894165039,
"learning_rate": 7.444444444444445e-06,
"loss": 0.9409,
"step": 1650
},
{
"epoch": 1.33,
"grad_norm": 18.283716201782227,
"learning_rate": 7.38888888888889e-06,
"loss": 0.9022,
"step": 1675
},
{
"epoch": 1.35,
"grad_norm": 12.344863891601562,
"learning_rate": 7.333333333333333e-06,
"loss": 0.9641,
"step": 1700
},
{
"epoch": 1.37,
"grad_norm": 19.406063079833984,
"learning_rate": 7.277777777777778e-06,
"loss": 0.9584,
"step": 1725
},
{
"epoch": 1.39,
"grad_norm": 14.66451644897461,
"learning_rate": 7.222222222222223e-06,
"loss": 0.9161,
"step": 1750
},
{
"epoch": 1.41,
"grad_norm": 22.2624454498291,
"learning_rate": 7.166666666666667e-06,
"loss": 0.8841,
"step": 1775
},
{
"epoch": 1.43,
"grad_norm": 21.720008850097656,
"learning_rate": 7.111111111111112e-06,
"loss": 0.8905,
"step": 1800
},
{
"epoch": 1.45,
"grad_norm": 18.579879760742188,
"learning_rate": 7.055555555555557e-06,
"loss": 0.9017,
"step": 1825
},
{
"epoch": 1.47,
"grad_norm": 13.965752601623535,
"learning_rate": 7e-06,
"loss": 0.9258,
"step": 1850
},
{
"epoch": 1.49,
"grad_norm": 12.050531387329102,
"learning_rate": 6.944444444444445e-06,
"loss": 0.8406,
"step": 1875
},
{
"epoch": 1.51,
"grad_norm": 20.574005126953125,
"learning_rate": 6.88888888888889e-06,
"loss": 0.9911,
"step": 1900
},
{
"epoch": 1.53,
"grad_norm": 13.421600341796875,
"learning_rate": 6.833333333333334e-06,
"loss": 0.9802,
"step": 1925
},
{
"epoch": 1.55,
"grad_norm": 15.10280704498291,
"learning_rate": 6.777777777777779e-06,
"loss": 0.9281,
"step": 1950
},
{
"epoch": 1.57,
"grad_norm": 16.386531829833984,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.9103,
"step": 1975
},
{
"epoch": 1.59,
"grad_norm": 21.13428497314453,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8821,
"step": 2000
},
{
"epoch": 1.59,
"eval_cer": 58.570669096984886,
"eval_loss": 1.163949728012085,
"eval_runtime": 688.3941,
"eval_samples_per_second": 3.651,
"eval_steps_per_second": 0.458,
"eval_wer": 80.6833023991201,
"step": 2000
},
{
"epoch": 1.61,
"grad_norm": 12.398762702941895,
"learning_rate": 6.6111111111111115e-06,
"loss": 0.9263,
"step": 2025
},
{
"epoch": 1.63,
"grad_norm": 19.7735538482666,
"learning_rate": 6.555555555555556e-06,
"loss": 0.9111,
"step": 2050
},
{
"epoch": 1.65,
"grad_norm": 17.024389266967773,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.8293,
"step": 2075
},
{
"epoch": 1.67,
"grad_norm": 25.842208862304688,
"learning_rate": 6.444444444444445e-06,
"loss": 0.9776,
"step": 2100
},
{
"epoch": 1.69,
"grad_norm": 13.255263328552246,
"learning_rate": 6.3888888888888885e-06,
"loss": 0.8695,
"step": 2125
},
{
"epoch": 1.71,
"grad_norm": 23.875268936157227,
"learning_rate": 6.333333333333333e-06,
"loss": 0.965,
"step": 2150
},
{
"epoch": 1.73,
"grad_norm": 12.608431816101074,
"learning_rate": 6.277777777777778e-06,
"loss": 0.9176,
"step": 2175
},
{
"epoch": 1.75,
"grad_norm": 18.595909118652344,
"learning_rate": 6.222222222222223e-06,
"loss": 0.9389,
"step": 2200
},
{
"epoch": 1.77,
"grad_norm": 17.5798397064209,
"learning_rate": 6.166666666666667e-06,
"loss": 0.8804,
"step": 2225
},
{
"epoch": 1.79,
"grad_norm": 20.442508697509766,
"learning_rate": 6.111111111111112e-06,
"loss": 0.9779,
"step": 2250
},
{
"epoch": 1.81,
"grad_norm": 17.262054443359375,
"learning_rate": 6.055555555555555e-06,
"loss": 0.9289,
"step": 2275
},
{
"epoch": 1.83,
"grad_norm": 11.93671989440918,
"learning_rate": 6e-06,
"loss": 0.8674,
"step": 2300
},
{
"epoch": 1.85,
"grad_norm": 19.9697322845459,
"learning_rate": 5.944444444444445e-06,
"loss": 0.8906,
"step": 2325
},
{
"epoch": 1.87,
"grad_norm": 19.09278678894043,
"learning_rate": 5.88888888888889e-06,
"loss": 0.949,
"step": 2350
},
{
"epoch": 1.89,
"grad_norm": 13.083151817321777,
"learning_rate": 5.833333333333334e-06,
"loss": 0.8925,
"step": 2375
},
{
"epoch": 1.91,
"grad_norm": 16.96628761291504,
"learning_rate": 5.777777777777778e-06,
"loss": 0.8806,
"step": 2400
},
{
"epoch": 1.93,
"grad_norm": 17.384708404541016,
"learning_rate": 5.722222222222222e-06,
"loss": 0.909,
"step": 2425
},
{
"epoch": 1.95,
"grad_norm": 16.435260772705078,
"learning_rate": 5.666666666666667e-06,
"loss": 0.9044,
"step": 2450
},
{
"epoch": 1.97,
"grad_norm": 19.849964141845703,
"learning_rate": 5.611111111111112e-06,
"loss": 0.8914,
"step": 2475
},
{
"epoch": 1.99,
"grad_norm": 18.659517288208008,
"learning_rate": 5.555555555555557e-06,
"loss": 0.8724,
"step": 2500
},
{
"epoch": 2.01,
"grad_norm": 11.454329490661621,
"learning_rate": 5.500000000000001e-06,
"loss": 0.6817,
"step": 2525
},
{
"epoch": 2.03,
"grad_norm": 15.80736255645752,
"learning_rate": 5.444444444444445e-06,
"loss": 0.5733,
"step": 2550
},
{
"epoch": 2.05,
"grad_norm": 22.253652572631836,
"learning_rate": 5.388888888888889e-06,
"loss": 0.6498,
"step": 2575
},
{
"epoch": 2.07,
"grad_norm": 16.574365615844727,
"learning_rate": 5.333333333333334e-06,
"loss": 0.586,
"step": 2600
},
{
"epoch": 2.09,
"grad_norm": 15.736115455627441,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.5926,
"step": 2625
},
{
"epoch": 2.11,
"grad_norm": 14.3034086227417,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.6365,
"step": 2650
},
{
"epoch": 2.13,
"grad_norm": 14.139527320861816,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.6302,
"step": 2675
},
{
"epoch": 2.15,
"grad_norm": 11.31550121307373,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.5432,
"step": 2700
},
{
"epoch": 2.17,
"grad_norm": 20.395217895507812,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.6269,
"step": 2725
},
{
"epoch": 2.19,
"grad_norm": 17.261280059814453,
"learning_rate": 5e-06,
"loss": 0.6443,
"step": 2750
},
{
"epoch": 2.21,
"grad_norm": 12.30817699432373,
"learning_rate": 4.944444444444445e-06,
"loss": 0.5756,
"step": 2775
},
{
"epoch": 2.23,
"grad_norm": 9.878785133361816,
"learning_rate": 4.888888888888889e-06,
"loss": 0.639,
"step": 2800
},
{
"epoch": 2.25,
"grad_norm": 14.701591491699219,
"learning_rate": 4.833333333333333e-06,
"loss": 0.6066,
"step": 2825
},
{
"epoch": 2.27,
"grad_norm": 16.308835983276367,
"learning_rate": 4.777777777777778e-06,
"loss": 0.6264,
"step": 2850
},
{
"epoch": 2.29,
"grad_norm": 19.56263542175293,
"learning_rate": 4.722222222222222e-06,
"loss": 0.6369,
"step": 2875
},
{
"epoch": 2.31,
"grad_norm": 6.289492607116699,
"learning_rate": 4.666666666666667e-06,
"loss": 0.6256,
"step": 2900
},
{
"epoch": 2.33,
"grad_norm": 13.804625511169434,
"learning_rate": 4.611111111111112e-06,
"loss": 0.6128,
"step": 2925
},
{
"epoch": 2.35,
"grad_norm": 15.281023979187012,
"learning_rate": 4.555555555555556e-06,
"loss": 0.645,
"step": 2950
},
{
"epoch": 2.37,
"grad_norm": 14.549323081970215,
"learning_rate": 4.5e-06,
"loss": 0.7194,
"step": 2975
},
{
"epoch": 2.39,
"grad_norm": 15.436699867248535,
"learning_rate": 4.444444444444444e-06,
"loss": 0.6527,
"step": 3000
},
{
"epoch": 2.39,
"eval_cer": 48.39877471456418,
"eval_loss": 1.1647427082061768,
"eval_runtime": 640.6707,
"eval_samples_per_second": 3.922,
"eval_steps_per_second": 0.492,
"eval_wer": 74.92953873650924,
"step": 3000
},
{
"epoch": 2.41,
"grad_norm": 10.962376594543457,
"learning_rate": 4.388888888888889e-06,
"loss": 0.6551,
"step": 3025
},
{
"epoch": 2.43,
"grad_norm": 14.927529335021973,
"learning_rate": 4.333333333333334e-06,
"loss": 0.5794,
"step": 3050
},
{
"epoch": 2.45,
"grad_norm": 18.27710723876953,
"learning_rate": 4.277777777777778e-06,
"loss": 0.6381,
"step": 3075
},
{
"epoch": 2.47,
"grad_norm": 12.02866268157959,
"learning_rate": 4.222222222222223e-06,
"loss": 0.559,
"step": 3100
},
{
"epoch": 2.49,
"grad_norm": 18.277559280395508,
"learning_rate": 4.166666666666667e-06,
"loss": 0.6653,
"step": 3125
},
{
"epoch": 2.51,
"grad_norm": 17.41031265258789,
"learning_rate": 4.111111111111111e-06,
"loss": 0.648,
"step": 3150
},
{
"epoch": 2.53,
"grad_norm": 17.889631271362305,
"learning_rate": 4.055555555555556e-06,
"loss": 0.5711,
"step": 3175
},
{
"epoch": 2.55,
"grad_norm": 12.564661026000977,
"learning_rate": 4.000000000000001e-06,
"loss": 0.6177,
"step": 3200
},
{
"epoch": 2.57,
"grad_norm": 12.346695899963379,
"learning_rate": 3.944444444444445e-06,
"loss": 0.607,
"step": 3225
},
{
"epoch": 2.59,
"grad_norm": 15.423745155334473,
"learning_rate": 3.88888888888889e-06,
"loss": 0.6286,
"step": 3250
},
{
"epoch": 2.61,
"grad_norm": 9.96914005279541,
"learning_rate": 3.833333333333334e-06,
"loss": 0.609,
"step": 3275
},
{
"epoch": 2.63,
"grad_norm": 17.189481735229492,
"learning_rate": 3.777777777777778e-06,
"loss": 0.5851,
"step": 3300
},
{
"epoch": 2.65,
"grad_norm": 12.618528366088867,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.6227,
"step": 3325
},
{
"epoch": 2.67,
"grad_norm": 14.24203109741211,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.637,
"step": 3350
},
{
"epoch": 2.68,
"grad_norm": 19.71699333190918,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.6088,
"step": 3375
},
{
"epoch": 2.7,
"grad_norm": 18.02073860168457,
"learning_rate": 3.555555555555556e-06,
"loss": 0.6252,
"step": 3400
},
{
"epoch": 2.72,
"grad_norm": 14.6773099899292,
"learning_rate": 3.5e-06,
"loss": 0.5767,
"step": 3425
},
{
"epoch": 2.74,
"grad_norm": 13.4647855758667,
"learning_rate": 3.444444444444445e-06,
"loss": 0.6186,
"step": 3450
},
{
"epoch": 2.76,
"grad_norm": 15.786669731140137,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.5699,
"step": 3475
},
{
"epoch": 2.78,
"grad_norm": 10.151808738708496,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.571,
"step": 3500
},
{
"epoch": 2.8,
"grad_norm": 11.594339370727539,
"learning_rate": 3.277777777777778e-06,
"loss": 0.6104,
"step": 3525
},
{
"epoch": 2.82,
"grad_norm": 19.481727600097656,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.584,
"step": 3550
},
{
"epoch": 2.84,
"grad_norm": 15.422442436218262,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.5463,
"step": 3575
},
{
"epoch": 2.86,
"grad_norm": 11.132479667663574,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.5999,
"step": 3600
},
{
"epoch": 2.88,
"grad_norm": 7.921601295471191,
"learning_rate": 3.055555555555556e-06,
"loss": 0.5775,
"step": 3625
},
{
"epoch": 2.9,
"grad_norm": 18.08513832092285,
"learning_rate": 3e-06,
"loss": 0.6061,
"step": 3650
},
{
"epoch": 2.92,
"grad_norm": 12.494170188903809,
"learning_rate": 2.944444444444445e-06,
"loss": 0.5436,
"step": 3675
},
{
"epoch": 2.94,
"grad_norm": 12.588512420654297,
"learning_rate": 2.888888888888889e-06,
"loss": 0.5468,
"step": 3700
},
{
"epoch": 2.96,
"grad_norm": 13.475690841674805,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.5645,
"step": 3725
},
{
"epoch": 2.98,
"grad_norm": 10.302828788757324,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.5733,
"step": 3750
},
{
"epoch": 3.0,
"grad_norm": 12.000411033630371,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.5959,
"step": 3775
},
{
"epoch": 3.02,
"grad_norm": 9.559981346130371,
"learning_rate": 2.666666666666667e-06,
"loss": 0.3823,
"step": 3800
},
{
"epoch": 3.04,
"grad_norm": 6.879577159881592,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.3876,
"step": 3825
},
{
"epoch": 3.06,
"grad_norm": 11.854860305786133,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.3963,
"step": 3850
},
{
"epoch": 3.08,
"grad_norm": 8.722013473510742,
"learning_rate": 2.5e-06,
"loss": 0.4091,
"step": 3875
},
{
"epoch": 3.1,
"grad_norm": 8.34683609008789,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.4084,
"step": 3900
},
{
"epoch": 3.12,
"grad_norm": 14.6788969039917,
"learning_rate": 2.388888888888889e-06,
"loss": 0.4166,
"step": 3925
},
{
"epoch": 3.14,
"grad_norm": 12.618093490600586,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.3751,
"step": 3950
},
{
"epoch": 3.16,
"grad_norm": 7.677009582519531,
"learning_rate": 2.277777777777778e-06,
"loss": 0.3864,
"step": 3975
},
{
"epoch": 3.18,
"grad_norm": 6.518270492553711,
"learning_rate": 2.222222222222222e-06,
"loss": 0.3078,
"step": 4000
},
{
"epoch": 3.18,
"eval_cer": 53.61383256120098,
"eval_loss": 1.188154935836792,
"eval_runtime": 654.7314,
"eval_samples_per_second": 3.838,
"eval_steps_per_second": 0.481,
"eval_wer": 85.45404550766482,
"step": 4000
},
{
"epoch": 3.2,
"grad_norm": 12.325676918029785,
"learning_rate": 2.166666666666667e-06,
"loss": 0.4185,
"step": 4025
},
{
"epoch": 3.22,
"grad_norm": 9.633131980895996,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.5039,
"step": 4050
},
{
"epoch": 3.24,
"grad_norm": 12.042168617248535,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.3689,
"step": 4075
},
{
"epoch": 3.26,
"grad_norm": 11.479193687438965,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.3856,
"step": 4100
},
{
"epoch": 3.28,
"grad_norm": 15.161530494689941,
"learning_rate": 1.944444444444445e-06,
"loss": 0.4055,
"step": 4125
},
{
"epoch": 3.3,
"grad_norm": 13.629692077636719,
"learning_rate": 1.888888888888889e-06,
"loss": 0.4257,
"step": 4150
},
{
"epoch": 3.32,
"grad_norm": 7.724813461303711,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.4206,
"step": 4175
},
{
"epoch": 3.34,
"grad_norm": 11.88393783569336,
"learning_rate": 1.777777777777778e-06,
"loss": 0.3937,
"step": 4200
},
{
"epoch": 3.36,
"grad_norm": 11.900699615478516,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.4389,
"step": 4225
},
{
"epoch": 3.38,
"grad_norm": 15.184126853942871,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.4195,
"step": 4250
},
{
"epoch": 3.4,
"grad_norm": 17.46352767944336,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.4278,
"step": 4275
},
{
"epoch": 3.42,
"grad_norm": 6.922684669494629,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.3707,
"step": 4300
},
{
"epoch": 3.44,
"grad_norm": 8.876372337341309,
"learning_rate": 1.5e-06,
"loss": 0.4042,
"step": 4325
},
{
"epoch": 3.46,
"grad_norm": 11.79426097869873,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.4306,
"step": 4350
},
{
"epoch": 3.48,
"grad_norm": 13.896903038024902,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.4667,
"step": 4375
},
{
"epoch": 3.5,
"grad_norm": 15.71197509765625,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.3795,
"step": 4400
},
{
"epoch": 3.52,
"grad_norm": 24.959102630615234,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.3962,
"step": 4425
},
{
"epoch": 3.54,
"grad_norm": 14.953639030456543,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.405,
"step": 4450
},
{
"epoch": 3.56,
"grad_norm": 9.408991813659668,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.3741,
"step": 4475
},
{
"epoch": 3.58,
"grad_norm": 13.057042121887207,
"learning_rate": 1.111111111111111e-06,
"loss": 0.443,
"step": 4500
},
{
"epoch": 3.6,
"grad_norm": 13.196622848510742,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.3873,
"step": 4525
},
{
"epoch": 3.62,
"grad_norm": 13.450845718383789,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.4196,
"step": 4550
},
{
"epoch": 3.64,
"grad_norm": 11.10022258758545,
"learning_rate": 9.444444444444445e-07,
"loss": 0.4027,
"step": 4575
},
{
"epoch": 3.66,
"grad_norm": 11.709542274475098,
"learning_rate": 8.88888888888889e-07,
"loss": 0.3875,
"step": 4600
},
{
"epoch": 3.68,
"grad_norm": 18.268943786621094,
"learning_rate": 8.333333333333333e-07,
"loss": 0.4404,
"step": 4625
},
{
"epoch": 3.7,
"grad_norm": 9.290139198303223,
"learning_rate": 7.777777777777779e-07,
"loss": 0.3566,
"step": 4650
},
{
"epoch": 3.72,
"grad_norm": 9.714057922363281,
"learning_rate": 7.222222222222222e-07,
"loss": 0.3539,
"step": 4675
},
{
"epoch": 3.74,
"grad_norm": 12.28147029876709,
"learning_rate": 6.666666666666667e-07,
"loss": 0.4019,
"step": 4700
},
{
"epoch": 3.76,
"grad_norm": 14.317485809326172,
"learning_rate": 6.111111111111112e-07,
"loss": 0.4342,
"step": 4725
},
{
"epoch": 3.78,
"grad_norm": 14.031659126281738,
"learning_rate": 5.555555555555555e-07,
"loss": 0.3736,
"step": 4750
},
{
"epoch": 3.8,
"grad_norm": 11.357044219970703,
"learning_rate": 5.000000000000001e-07,
"loss": 0.4058,
"step": 4775
},
{
"epoch": 3.82,
"grad_norm": 15.167471885681152,
"learning_rate": 4.444444444444445e-07,
"loss": 0.3479,
"step": 4800
},
{
"epoch": 3.84,
"grad_norm": 10.228837966918945,
"learning_rate": 3.8888888888888895e-07,
"loss": 0.3747,
"step": 4825
},
{
"epoch": 3.86,
"grad_norm": 15.795928955078125,
"learning_rate": 3.3333333333333335e-07,
"loss": 0.4141,
"step": 4850
},
{
"epoch": 3.88,
"grad_norm": 14.220040321350098,
"learning_rate": 2.7777777777777776e-07,
"loss": 0.3817,
"step": 4875
},
{
"epoch": 3.9,
"grad_norm": 15.395153999328613,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.3983,
"step": 4900
},
{
"epoch": 3.92,
"grad_norm": 16.81828498840332,
"learning_rate": 1.6666666666666668e-07,
"loss": 0.4367,
"step": 4925
},
{
"epoch": 3.94,
"grad_norm": 12.835176467895508,
"learning_rate": 1.1111111111111112e-07,
"loss": 0.3874,
"step": 4950
},
{
"epoch": 3.96,
"grad_norm": 16.80717658996582,
"learning_rate": 5.555555555555556e-08,
"loss": 0.3914,
"step": 4975
},
{
"epoch": 3.98,
"grad_norm": 17.128372192382812,
"learning_rate": 0.0,
"loss": 0.403,
"step": 5000
},
{
"epoch": 3.98,
"eval_cer": 51.96957039062302,
"eval_loss": 1.188420295715332,
"eval_runtime": 653.7956,
"eval_samples_per_second": 3.844,
"eval_steps_per_second": 0.482,
"eval_wer": 79.83776723723103,
"step": 5000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 1000,
"total_flos": 1.15390872317952e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}