whisper-small-pt-cv16-fleurs / trainer_state.json
fsicoli's picture
Upload 17 files
f551eb6 verified
raw
history blame contribute delete
No virus
31.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.3342670401493932,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 42.80295181274414,
"learning_rate": 5e-09,
"loss": 1.0881,
"step": 25
},
{
"epoch": 0.02,
"grad_norm": 36.09112548828125,
"learning_rate": 1e-08,
"loss": 1.0284,
"step": 50
},
{
"epoch": 0.04,
"grad_norm": 42.47553634643555,
"learning_rate": 1.5e-08,
"loss": 1.0556,
"step": 75
},
{
"epoch": 0.05,
"grad_norm": 40.723106384277344,
"learning_rate": 2e-08,
"loss": 1.0326,
"step": 100
},
{
"epoch": 0.06,
"grad_norm": 39.514530181884766,
"learning_rate": 2.5e-08,
"loss": 1.0675,
"step": 125
},
{
"epoch": 0.07,
"grad_norm": 40.45370864868164,
"learning_rate": 3e-08,
"loss": 1.0513,
"step": 150
},
{
"epoch": 0.08,
"grad_norm": 38.05487823486328,
"learning_rate": 3.5e-08,
"loss": 1.0533,
"step": 175
},
{
"epoch": 0.09,
"grad_norm": 36.58195114135742,
"learning_rate": 4e-08,
"loss": 1.05,
"step": 200
},
{
"epoch": 0.11,
"grad_norm": 41.44173049926758,
"learning_rate": 4.5e-08,
"loss": 1.0471,
"step": 225
},
{
"epoch": 0.12,
"grad_norm": 37.410099029541016,
"learning_rate": 5e-08,
"loss": 1.0451,
"step": 250
},
{
"epoch": 0.13,
"grad_norm": 38.047340393066406,
"learning_rate": 5.4999999999999996e-08,
"loss": 1.0402,
"step": 275
},
{
"epoch": 0.14,
"grad_norm": 38.6348762512207,
"learning_rate": 6e-08,
"loss": 1.0654,
"step": 300
},
{
"epoch": 0.15,
"grad_norm": 38.60363006591797,
"learning_rate": 6.5e-08,
"loss": 1.0348,
"step": 325
},
{
"epoch": 0.16,
"grad_norm": 37.33879089355469,
"learning_rate": 7e-08,
"loss": 1.072,
"step": 350
},
{
"epoch": 0.18,
"grad_norm": 36.03329086303711,
"learning_rate": 7.5e-08,
"loss": 0.9851,
"step": 375
},
{
"epoch": 0.19,
"grad_norm": 34.04118347167969,
"learning_rate": 8e-08,
"loss": 1.0417,
"step": 400
},
{
"epoch": 0.2,
"grad_norm": 34.55414581298828,
"learning_rate": 8.500000000000001e-08,
"loss": 0.9978,
"step": 425
},
{
"epoch": 0.21,
"grad_norm": 35.677894592285156,
"learning_rate": 9e-08,
"loss": 1.0084,
"step": 450
},
{
"epoch": 0.22,
"grad_norm": 36.7861213684082,
"learning_rate": 9.499999999999999e-08,
"loss": 0.9627,
"step": 475
},
{
"epoch": 0.23,
"grad_norm": 32.090797424316406,
"learning_rate": 1e-07,
"loss": 0.9709,
"step": 500
},
{
"epoch": 0.25,
"grad_norm": 35.564849853515625,
"learning_rate": 1.0499999999999999e-07,
"loss": 0.9682,
"step": 525
},
{
"epoch": 0.26,
"grad_norm": 27.45682716369629,
"learning_rate": 1.0999999999999999e-07,
"loss": 0.9262,
"step": 550
},
{
"epoch": 0.27,
"grad_norm": 32.76565933227539,
"learning_rate": 1.15e-07,
"loss": 0.8343,
"step": 575
},
{
"epoch": 0.28,
"grad_norm": 25.909934997558594,
"learning_rate": 1.2e-07,
"loss": 0.8247,
"step": 600
},
{
"epoch": 0.29,
"grad_norm": 28.087352752685547,
"learning_rate": 1.25e-07,
"loss": 0.8033,
"step": 625
},
{
"epoch": 0.3,
"grad_norm": 28.653064727783203,
"learning_rate": 1.3e-07,
"loss": 0.742,
"step": 650
},
{
"epoch": 0.32,
"grad_norm": 25.42856216430664,
"learning_rate": 1.35e-07,
"loss": 0.6943,
"step": 675
},
{
"epoch": 0.33,
"grad_norm": 33.012351989746094,
"learning_rate": 1.4e-07,
"loss": 0.6754,
"step": 700
},
{
"epoch": 0.34,
"grad_norm": 30.743101119995117,
"learning_rate": 1.45e-07,
"loss": 0.5455,
"step": 725
},
{
"epoch": 0.35,
"grad_norm": 26.959556579589844,
"learning_rate": 1.5e-07,
"loss": 0.5058,
"step": 750
},
{
"epoch": 0.36,
"grad_norm": 21.917985916137695,
"learning_rate": 1.55e-07,
"loss": 0.4522,
"step": 775
},
{
"epoch": 0.37,
"grad_norm": 25.912960052490234,
"learning_rate": 1.6e-07,
"loss": 0.4506,
"step": 800
},
{
"epoch": 0.39,
"grad_norm": 25.664531707763672,
"learning_rate": 1.65e-07,
"loss": 0.4513,
"step": 825
},
{
"epoch": 0.4,
"grad_norm": 22.275697708129883,
"learning_rate": 1.7000000000000001e-07,
"loss": 0.4161,
"step": 850
},
{
"epoch": 0.41,
"grad_norm": 24.786251068115234,
"learning_rate": 1.75e-07,
"loss": 0.4154,
"step": 875
},
{
"epoch": 0.42,
"grad_norm": 25.422107696533203,
"learning_rate": 1.8e-07,
"loss": 0.3859,
"step": 900
},
{
"epoch": 0.43,
"grad_norm": 17.307992935180664,
"learning_rate": 1.85e-07,
"loss": 0.3672,
"step": 925
},
{
"epoch": 0.44,
"grad_norm": 21.586824417114258,
"learning_rate": 1.8999999999999998e-07,
"loss": 0.3655,
"step": 950
},
{
"epoch": 0.46,
"grad_norm": 20.625280380249023,
"learning_rate": 1.9499999999999999e-07,
"loss": 0.3717,
"step": 975
},
{
"epoch": 0.47,
"grad_norm": 24.026769638061523,
"learning_rate": 2e-07,
"loss": 0.3915,
"step": 1000
},
{
"epoch": 0.47,
"eval_loss": 0.29781457781791687,
"eval_runtime": 1337.151,
"eval_samples_per_second": 7.04,
"eval_steps_per_second": 0.44,
"eval_wer": 0.18578591812714637,
"step": 1000
},
{
"epoch": 0.48,
"grad_norm": 19.427650451660156,
"learning_rate": 2.0499999999999997e-07,
"loss": 0.3242,
"step": 1025
},
{
"epoch": 0.49,
"grad_norm": 17.370065689086914,
"learning_rate": 2.0999999999999997e-07,
"loss": 0.3605,
"step": 1050
},
{
"epoch": 0.5,
"grad_norm": 17.335697174072266,
"learning_rate": 2.1499999999999998e-07,
"loss": 0.3689,
"step": 1075
},
{
"epoch": 0.51,
"grad_norm": 18.611391067504883,
"learning_rate": 2.1999999999999998e-07,
"loss": 0.4011,
"step": 1100
},
{
"epoch": 0.53,
"grad_norm": 19.87590217590332,
"learning_rate": 2.25e-07,
"loss": 0.3401,
"step": 1125
},
{
"epoch": 0.54,
"grad_norm": 14.708137512207031,
"learning_rate": 2.3e-07,
"loss": 0.353,
"step": 1150
},
{
"epoch": 0.55,
"grad_norm": 19.14278793334961,
"learning_rate": 2.3499999999999997e-07,
"loss": 0.3472,
"step": 1175
},
{
"epoch": 0.56,
"grad_norm": 15.516341209411621,
"learning_rate": 2.4e-07,
"loss": 0.3431,
"step": 1200
},
{
"epoch": 0.57,
"grad_norm": 20.186065673828125,
"learning_rate": 2.45e-07,
"loss": 0.3244,
"step": 1225
},
{
"epoch": 0.58,
"grad_norm": 21.650047302246094,
"learning_rate": 2.5e-07,
"loss": 0.341,
"step": 1250
},
{
"epoch": 0.6,
"grad_norm": 17.734304428100586,
"learning_rate": 2.55e-07,
"loss": 0.3598,
"step": 1275
},
{
"epoch": 0.61,
"grad_norm": 19.580183029174805,
"learning_rate": 2.6e-07,
"loss": 0.3312,
"step": 1300
},
{
"epoch": 0.62,
"grad_norm": 19.631921768188477,
"learning_rate": 2.65e-07,
"loss": 0.3442,
"step": 1325
},
{
"epoch": 0.63,
"grad_norm": 18.21005630493164,
"learning_rate": 2.7e-07,
"loss": 0.3318,
"step": 1350
},
{
"epoch": 0.64,
"grad_norm": 20.728309631347656,
"learning_rate": 2.75e-07,
"loss": 0.3255,
"step": 1375
},
{
"epoch": 0.65,
"grad_norm": 15.139528274536133,
"learning_rate": 2.8e-07,
"loss": 0.3195,
"step": 1400
},
{
"epoch": 0.67,
"grad_norm": 14.433816909790039,
"learning_rate": 2.8499999999999997e-07,
"loss": 0.2906,
"step": 1425
},
{
"epoch": 0.68,
"grad_norm": 17.25659942626953,
"learning_rate": 2.9e-07,
"loss": 0.3455,
"step": 1450
},
{
"epoch": 0.69,
"grad_norm": 21.09754180908203,
"learning_rate": 2.95e-07,
"loss": 0.3386,
"step": 1475
},
{
"epoch": 0.7,
"grad_norm": 15.790301322937012,
"learning_rate": 3e-07,
"loss": 0.3348,
"step": 1500
},
{
"epoch": 0.71,
"grad_norm": 23.314483642578125,
"learning_rate": 3.05e-07,
"loss": 0.3079,
"step": 1525
},
{
"epoch": 0.72,
"grad_norm": 16.656147003173828,
"learning_rate": 3.1e-07,
"loss": 0.3132,
"step": 1550
},
{
"epoch": 0.74,
"grad_norm": 13.165972709655762,
"learning_rate": 3.15e-07,
"loss": 0.2978,
"step": 1575
},
{
"epoch": 0.75,
"grad_norm": 18.286815643310547,
"learning_rate": 3.2e-07,
"loss": 0.3112,
"step": 1600
},
{
"epoch": 0.76,
"grad_norm": 16.560523986816406,
"learning_rate": 3.25e-07,
"loss": 0.3051,
"step": 1625
},
{
"epoch": 0.77,
"grad_norm": 18.13733673095703,
"learning_rate": 3.3e-07,
"loss": 0.3098,
"step": 1650
},
{
"epoch": 0.78,
"grad_norm": 19.807390213012695,
"learning_rate": 3.35e-07,
"loss": 0.2979,
"step": 1675
},
{
"epoch": 0.79,
"grad_norm": 15.216619491577148,
"learning_rate": 3.4000000000000003e-07,
"loss": 0.2979,
"step": 1700
},
{
"epoch": 0.81,
"grad_norm": 17.19868278503418,
"learning_rate": 3.45e-07,
"loss": 0.3039,
"step": 1725
},
{
"epoch": 0.82,
"grad_norm": 16.618858337402344,
"learning_rate": 3.5e-07,
"loss": 0.2991,
"step": 1750
},
{
"epoch": 0.83,
"grad_norm": 14.719141006469727,
"learning_rate": 3.55e-07,
"loss": 0.2914,
"step": 1775
},
{
"epoch": 0.84,
"grad_norm": 18.658220291137695,
"learning_rate": 3.6e-07,
"loss": 0.2904,
"step": 1800
},
{
"epoch": 0.85,
"grad_norm": 16.73942756652832,
"learning_rate": 3.65e-07,
"loss": 0.2933,
"step": 1825
},
{
"epoch": 0.86,
"grad_norm": 17.588727951049805,
"learning_rate": 3.7e-07,
"loss": 0.3185,
"step": 1850
},
{
"epoch": 0.88,
"grad_norm": 14.940112113952637,
"learning_rate": 3.75e-07,
"loss": 0.3014,
"step": 1875
},
{
"epoch": 0.89,
"grad_norm": 16.759981155395508,
"learning_rate": 3.7999999999999996e-07,
"loss": 0.29,
"step": 1900
},
{
"epoch": 0.9,
"grad_norm": 16.736419677734375,
"learning_rate": 3.8499999999999997e-07,
"loss": 0.3015,
"step": 1925
},
{
"epoch": 0.91,
"grad_norm": 13.355605125427246,
"learning_rate": 3.8999999999999997e-07,
"loss": 0.3035,
"step": 1950
},
{
"epoch": 0.92,
"grad_norm": 22.4385929107666,
"learning_rate": 3.95e-07,
"loss": 0.2983,
"step": 1975
},
{
"epoch": 0.93,
"grad_norm": 15.556875228881836,
"learning_rate": 4e-07,
"loss": 0.2942,
"step": 2000
},
{
"epoch": 0.93,
"eval_loss": 0.24996864795684814,
"eval_runtime": 1326.5807,
"eval_samples_per_second": 7.096,
"eval_steps_per_second": 0.444,
"eval_wer": 0.1626493397610564,
"step": 2000
},
{
"epoch": 0.95,
"grad_norm": 17.8362979888916,
"learning_rate": 4.05e-07,
"loss": 0.2912,
"step": 2025
},
{
"epoch": 0.96,
"grad_norm": 21.89351463317871,
"learning_rate": 4.0999999999999994e-07,
"loss": 0.3063,
"step": 2050
},
{
"epoch": 0.97,
"grad_norm": 13.810240745544434,
"learning_rate": 4.1499999999999994e-07,
"loss": 0.3019,
"step": 2075
},
{
"epoch": 0.98,
"grad_norm": 17.076282501220703,
"learning_rate": 4.1999999999999995e-07,
"loss": 0.3091,
"step": 2100
},
{
"epoch": 0.99,
"grad_norm": 15.350356101989746,
"learning_rate": 4.2499999999999995e-07,
"loss": 0.2994,
"step": 2125
},
{
"epoch": 1.0,
"grad_norm": 7.815631866455078,
"learning_rate": 4.2999999999999996e-07,
"loss": 0.2792,
"step": 2150
},
{
"epoch": 1.02,
"grad_norm": 8.397562026977539,
"learning_rate": 4.3499999999999996e-07,
"loss": 0.277,
"step": 2175
},
{
"epoch": 1.03,
"grad_norm": 7.092517852783203,
"learning_rate": 4.3999999999999997e-07,
"loss": 0.2732,
"step": 2200
},
{
"epoch": 1.04,
"grad_norm": 8.4801607131958,
"learning_rate": 4.45e-07,
"loss": 0.2876,
"step": 2225
},
{
"epoch": 1.05,
"grad_norm": 9.114388465881348,
"learning_rate": 4.5e-07,
"loss": 0.2702,
"step": 2250
},
{
"epoch": 1.06,
"grad_norm": 7.309689521789551,
"learning_rate": 4.55e-07,
"loss": 0.2994,
"step": 2275
},
{
"epoch": 1.07,
"grad_norm": 7.109182357788086,
"learning_rate": 4.6e-07,
"loss": 0.2752,
"step": 2300
},
{
"epoch": 1.09,
"grad_norm": 8.527280807495117,
"learning_rate": 4.65e-07,
"loss": 0.2493,
"step": 2325
},
{
"epoch": 1.1,
"grad_norm": 5.584944725036621,
"learning_rate": 4.6999999999999995e-07,
"loss": 0.2618,
"step": 2350
},
{
"epoch": 1.11,
"grad_norm": 7.848605632781982,
"learning_rate": 4.7499999999999995e-07,
"loss": 0.2748,
"step": 2375
},
{
"epoch": 1.12,
"grad_norm": 6.040729999542236,
"learning_rate": 4.8e-07,
"loss": 0.2736,
"step": 2400
},
{
"epoch": 1.13,
"grad_norm": 7.142736434936523,
"learning_rate": 4.85e-07,
"loss": 0.258,
"step": 2425
},
{
"epoch": 1.14,
"grad_norm": 7.892075061798096,
"learning_rate": 4.9e-07,
"loss": 0.3096,
"step": 2450
},
{
"epoch": 1.16,
"grad_norm": 8.93137264251709,
"learning_rate": 4.95e-07,
"loss": 0.2722,
"step": 2475
},
{
"epoch": 1.17,
"grad_norm": 8.166301727294922,
"learning_rate": 5e-07,
"loss": 0.2924,
"step": 2500
},
{
"epoch": 1.18,
"grad_norm": 6.910344123840332,
"learning_rate": 5.049999999999999e-07,
"loss": 0.2543,
"step": 2525
},
{
"epoch": 1.19,
"grad_norm": 9.456042289733887,
"learning_rate": 5.1e-07,
"loss": 0.29,
"step": 2550
},
{
"epoch": 1.2,
"grad_norm": 6.1278395652771,
"learning_rate": 5.149999999999999e-07,
"loss": 0.2752,
"step": 2575
},
{
"epoch": 1.21,
"grad_norm": 8.067246437072754,
"learning_rate": 5.2e-07,
"loss": 0.2821,
"step": 2600
},
{
"epoch": 1.23,
"grad_norm": 7.603886127471924,
"learning_rate": 5.25e-07,
"loss": 0.2838,
"step": 2625
},
{
"epoch": 1.24,
"grad_norm": 8.258223533630371,
"learning_rate": 5.3e-07,
"loss": 0.2736,
"step": 2650
},
{
"epoch": 1.25,
"grad_norm": 6.4209065437316895,
"learning_rate": 5.35e-07,
"loss": 0.2536,
"step": 2675
},
{
"epoch": 1.26,
"grad_norm": 7.871333122253418,
"learning_rate": 5.4e-07,
"loss": 0.2779,
"step": 2700
},
{
"epoch": 1.27,
"grad_norm": 7.8993306159973145,
"learning_rate": 5.45e-07,
"loss": 0.2718,
"step": 2725
},
{
"epoch": 1.28,
"grad_norm": 7.633549690246582,
"learning_rate": 5.5e-07,
"loss": 0.2635,
"step": 2750
},
{
"epoch": 1.3,
"grad_norm": 5.408315658569336,
"learning_rate": 5.55e-07,
"loss": 0.2602,
"step": 2775
},
{
"epoch": 1.31,
"grad_norm": 8.084305763244629,
"learning_rate": 5.6e-07,
"loss": 0.2495,
"step": 2800
},
{
"epoch": 1.32,
"grad_norm": 6.785496234893799,
"learning_rate": 5.649999999999999e-07,
"loss": 0.2797,
"step": 2825
},
{
"epoch": 1.33,
"grad_norm": 8.085886001586914,
"learning_rate": 5.699999999999999e-07,
"loss": 0.2562,
"step": 2850
},
{
"epoch": 1.34,
"grad_norm": 8.028220176696777,
"learning_rate": 5.749999999999999e-07,
"loss": 0.2534,
"step": 2875
},
{
"epoch": 1.35,
"grad_norm": 7.391637802124023,
"learning_rate": 5.8e-07,
"loss": 0.2794,
"step": 2900
},
{
"epoch": 1.37,
"grad_norm": 7.541261672973633,
"learning_rate": 5.849999999999999e-07,
"loss": 0.2776,
"step": 2925
},
{
"epoch": 1.38,
"grad_norm": 7.456998348236084,
"learning_rate": 5.9e-07,
"loss": 0.2883,
"step": 2950
},
{
"epoch": 1.39,
"grad_norm": 4.759747505187988,
"learning_rate": 5.949999999999999e-07,
"loss": 0.2468,
"step": 2975
},
{
"epoch": 1.4,
"grad_norm": 6.258363723754883,
"learning_rate": 6e-07,
"loss": 0.2877,
"step": 3000
},
{
"epoch": 1.4,
"eval_loss": 0.2336360216140747,
"eval_runtime": 1334.64,
"eval_samples_per_second": 7.054,
"eval_steps_per_second": 0.441,
"eval_wer": 0.15355593892588235,
"step": 3000
},
{
"epoch": 1.41,
"grad_norm": 6.2899651527404785,
"learning_rate": 6.049999999999999e-07,
"loss": 0.2729,
"step": 3025
},
{
"epoch": 1.42,
"grad_norm": 8.125208854675293,
"learning_rate": 6.1e-07,
"loss": 0.2598,
"step": 3050
},
{
"epoch": 1.44,
"grad_norm": 7.969179630279541,
"learning_rate": 6.149999999999999e-07,
"loss": 0.2401,
"step": 3075
},
{
"epoch": 1.45,
"grad_norm": 5.177332878112793,
"learning_rate": 6.2e-07,
"loss": 0.2491,
"step": 3100
},
{
"epoch": 1.46,
"grad_norm": 9.723845481872559,
"learning_rate": 6.249999999999999e-07,
"loss": 0.2651,
"step": 3125
},
{
"epoch": 1.47,
"grad_norm": 7.722317218780518,
"learning_rate": 6.3e-07,
"loss": 0.2669,
"step": 3150
},
{
"epoch": 1.48,
"grad_norm": 5.817755222320557,
"learning_rate": 6.35e-07,
"loss": 0.2632,
"step": 3175
},
{
"epoch": 1.49,
"grad_norm": 7.001987934112549,
"learning_rate": 6.4e-07,
"loss": 0.2419,
"step": 3200
},
{
"epoch": 1.51,
"grad_norm": 6.027250289916992,
"learning_rate": 6.45e-07,
"loss": 0.2975,
"step": 3225
},
{
"epoch": 1.52,
"grad_norm": 7.742289066314697,
"learning_rate": 6.5e-07,
"loss": 0.2574,
"step": 3250
},
{
"epoch": 1.53,
"grad_norm": 7.380095958709717,
"learning_rate": 6.55e-07,
"loss": 0.2447,
"step": 3275
},
{
"epoch": 1.54,
"grad_norm": 7.48017692565918,
"learning_rate": 6.6e-07,
"loss": 0.2903,
"step": 3300
},
{
"epoch": 1.55,
"grad_norm": 5.8539299964904785,
"learning_rate": 6.65e-07,
"loss": 0.2622,
"step": 3325
},
{
"epoch": 1.56,
"grad_norm": 5.69394063949585,
"learning_rate": 6.7e-07,
"loss": 0.2582,
"step": 3350
},
{
"epoch": 1.58,
"grad_norm": 7.406588077545166,
"learning_rate": 6.75e-07,
"loss": 0.2804,
"step": 3375
},
{
"epoch": 1.59,
"grad_norm": 5.877038955688477,
"learning_rate": 6.800000000000001e-07,
"loss": 0.2399,
"step": 3400
},
{
"epoch": 1.6,
"grad_norm": 6.2857184410095215,
"learning_rate": 6.85e-07,
"loss": 0.2732,
"step": 3425
},
{
"epoch": 1.61,
"grad_norm": 4.856884956359863,
"learning_rate": 6.9e-07,
"loss": 0.2459,
"step": 3450
},
{
"epoch": 1.62,
"grad_norm": 5.824467658996582,
"learning_rate": 6.949999999999999e-07,
"loss": 0.2487,
"step": 3475
},
{
"epoch": 1.63,
"grad_norm": 6.234816074371338,
"learning_rate": 7e-07,
"loss": 0.2594,
"step": 3500
},
{
"epoch": 1.65,
"grad_norm": 8.982284545898438,
"learning_rate": 7.049999999999999e-07,
"loss": 0.2624,
"step": 3525
},
{
"epoch": 1.66,
"grad_norm": 8.273755073547363,
"learning_rate": 7.1e-07,
"loss": 0.267,
"step": 3550
},
{
"epoch": 1.67,
"grad_norm": 4.855440616607666,
"learning_rate": 7.149999999999999e-07,
"loss": 0.2444,
"step": 3575
},
{
"epoch": 1.68,
"grad_norm": 7.118260383605957,
"learning_rate": 7.2e-07,
"loss": 0.2453,
"step": 3600
},
{
"epoch": 1.69,
"grad_norm": 7.056091785430908,
"learning_rate": 7.249999999999999e-07,
"loss": 0.2602,
"step": 3625
},
{
"epoch": 1.7,
"grad_norm": 8.34477424621582,
"learning_rate": 7.3e-07,
"loss": 0.2748,
"step": 3650
},
{
"epoch": 1.72,
"grad_norm": 6.903721809387207,
"learning_rate": 7.35e-07,
"loss": 0.2287,
"step": 3675
},
{
"epoch": 1.73,
"grad_norm": 6.3288092613220215,
"learning_rate": 7.4e-07,
"loss": 0.2604,
"step": 3700
},
{
"epoch": 1.74,
"grad_norm": 9.17168140411377,
"learning_rate": 7.45e-07,
"loss": 0.2772,
"step": 3725
},
{
"epoch": 1.75,
"grad_norm": 6.454259395599365,
"learning_rate": 7.5e-07,
"loss": 0.2862,
"step": 3750
},
{
"epoch": 1.76,
"grad_norm": 7.24948787689209,
"learning_rate": 7.55e-07,
"loss": 0.254,
"step": 3775
},
{
"epoch": 1.77,
"grad_norm": 6.764554977416992,
"learning_rate": 7.599999999999999e-07,
"loss": 0.2287,
"step": 3800
},
{
"epoch": 1.79,
"grad_norm": 7.178032875061035,
"learning_rate": 7.65e-07,
"loss": 0.2592,
"step": 3825
},
{
"epoch": 1.8,
"grad_norm": 5.957449436187744,
"learning_rate": 7.699999999999999e-07,
"loss": 0.2601,
"step": 3850
},
{
"epoch": 1.81,
"grad_norm": 6.306526184082031,
"learning_rate": 7.75e-07,
"loss": 0.2499,
"step": 3875
},
{
"epoch": 1.82,
"grad_norm": 6.489150524139404,
"learning_rate": 7.799999999999999e-07,
"loss": 0.2427,
"step": 3900
},
{
"epoch": 1.83,
"grad_norm": 8.371627807617188,
"learning_rate": 7.85e-07,
"loss": 0.2586,
"step": 3925
},
{
"epoch": 1.84,
"grad_norm": 6.425754547119141,
"learning_rate": 7.9e-07,
"loss": 0.2383,
"step": 3950
},
{
"epoch": 1.86,
"grad_norm": 8.453587532043457,
"learning_rate": 7.95e-07,
"loss": 0.2364,
"step": 3975
},
{
"epoch": 1.87,
"grad_norm": 5.8362717628479,
"learning_rate": 8e-07,
"loss": 0.2303,
"step": 4000
},
{
"epoch": 1.87,
"eval_loss": 0.22252435982227325,
"eval_runtime": 1357.5217,
"eval_samples_per_second": 6.935,
"eval_steps_per_second": 0.434,
"eval_wer": 0.14815471679860698,
"step": 4000
},
{
"epoch": 1.88,
"grad_norm": 7.4534735679626465,
"learning_rate": 8.05e-07,
"loss": 0.2451,
"step": 4025
},
{
"epoch": 1.89,
"grad_norm": 5.907018661499023,
"learning_rate": 8.1e-07,
"loss": 0.2392,
"step": 4050
},
{
"epoch": 1.9,
"grad_norm": 6.427994728088379,
"learning_rate": 8.149999999999999e-07,
"loss": 0.2318,
"step": 4075
},
{
"epoch": 1.91,
"grad_norm": 7.975613117218018,
"learning_rate": 8.199999999999999e-07,
"loss": 0.2368,
"step": 4100
},
{
"epoch": 1.93,
"grad_norm": 6.577622413635254,
"learning_rate": 8.249999999999999e-07,
"loss": 0.2458,
"step": 4125
},
{
"epoch": 1.94,
"grad_norm": 7.6984429359436035,
"learning_rate": 8.299999999999999e-07,
"loss": 0.2425,
"step": 4150
},
{
"epoch": 1.95,
"grad_norm": 6.880575180053711,
"learning_rate": 8.349999999999999e-07,
"loss": 0.2376,
"step": 4175
},
{
"epoch": 1.96,
"grad_norm": 8.09802532196045,
"learning_rate": 8.399999999999999e-07,
"loss": 0.2668,
"step": 4200
},
{
"epoch": 1.97,
"grad_norm": 5.784348011016846,
"learning_rate": 8.45e-07,
"loss": 0.2772,
"step": 4225
},
{
"epoch": 1.98,
"grad_norm": 6.168551921844482,
"learning_rate": 8.499999999999999e-07,
"loss": 0.2394,
"step": 4250
},
{
"epoch": 2.0,
"grad_norm": 8.498140335083008,
"learning_rate": 8.55e-07,
"loss": 0.2599,
"step": 4275
},
{
"epoch": 2.01,
"grad_norm": 9.183964729309082,
"learning_rate": 8.599999999999999e-07,
"loss": 0.234,
"step": 4300
},
{
"epoch": 2.02,
"grad_norm": 7.023620128631592,
"learning_rate": 8.65e-07,
"loss": 0.2208,
"step": 4325
},
{
"epoch": 2.03,
"grad_norm": 6.869289398193359,
"learning_rate": 8.699999999999999e-07,
"loss": 0.2199,
"step": 4350
},
{
"epoch": 2.04,
"grad_norm": 7.651854991912842,
"learning_rate": 8.75e-07,
"loss": 0.2153,
"step": 4375
},
{
"epoch": 2.05,
"grad_norm": 10.046488761901855,
"learning_rate": 8.799999999999999e-07,
"loss": 0.2511,
"step": 4400
},
{
"epoch": 2.07,
"grad_norm": 7.229065895080566,
"learning_rate": 8.85e-07,
"loss": 0.2172,
"step": 4425
},
{
"epoch": 2.08,
"grad_norm": 12.570436477661133,
"learning_rate": 8.9e-07,
"loss": 0.2315,
"step": 4450
},
{
"epoch": 2.09,
"grad_norm": 7.68054723739624,
"learning_rate": 8.95e-07,
"loss": 0.2073,
"step": 4475
},
{
"epoch": 2.1,
"grad_norm": 4.375064373016357,
"learning_rate": 9e-07,
"loss": 0.2313,
"step": 4500
},
{
"epoch": 2.11,
"grad_norm": 8.125436782836914,
"learning_rate": 9.05e-07,
"loss": 0.2324,
"step": 4525
},
{
"epoch": 2.12,
"grad_norm": 7.027578353881836,
"learning_rate": 9.1e-07,
"loss": 0.2251,
"step": 4550
},
{
"epoch": 2.14,
"grad_norm": 7.733719348907471,
"learning_rate": 9.15e-07,
"loss": 0.2192,
"step": 4575
},
{
"epoch": 2.15,
"grad_norm": 5.648609638214111,
"learning_rate": 9.2e-07,
"loss": 0.2274,
"step": 4600
},
{
"epoch": 2.16,
"grad_norm": 10.744574546813965,
"learning_rate": 9.25e-07,
"loss": 0.2301,
"step": 4625
},
{
"epoch": 2.17,
"grad_norm": 8.391999244689941,
"learning_rate": 9.3e-07,
"loss": 0.2348,
"step": 4650
},
{
"epoch": 2.18,
"grad_norm": 8.003060340881348,
"learning_rate": 9.35e-07,
"loss": 0.2147,
"step": 4675
},
{
"epoch": 2.19,
"grad_norm": 7.240922927856445,
"learning_rate": 9.399999999999999e-07,
"loss": 0.2177,
"step": 4700
},
{
"epoch": 2.21,
"grad_norm": 4.615699291229248,
"learning_rate": 9.45e-07,
"loss": 0.224,
"step": 4725
},
{
"epoch": 2.22,
"grad_norm": 5.88244104385376,
"learning_rate": 9.499999999999999e-07,
"loss": 0.2119,
"step": 4750
},
{
"epoch": 2.23,
"grad_norm": 8.743966102600098,
"learning_rate": 9.55e-07,
"loss": 0.2057,
"step": 4775
},
{
"epoch": 2.24,
"grad_norm": 6.3263325691223145,
"learning_rate": 9.6e-07,
"loss": 0.208,
"step": 4800
},
{
"epoch": 2.25,
"grad_norm": 8.424788475036621,
"learning_rate": 9.649999999999999e-07,
"loss": 0.2147,
"step": 4825
},
{
"epoch": 2.26,
"grad_norm": 10.14958381652832,
"learning_rate": 9.7e-07,
"loss": 0.2065,
"step": 4850
},
{
"epoch": 2.28,
"grad_norm": 7.147301197052002,
"learning_rate": 9.75e-07,
"loss": 0.2161,
"step": 4875
},
{
"epoch": 2.29,
"grad_norm": 7.870689868927002,
"learning_rate": 9.8e-07,
"loss": 0.2138,
"step": 4900
},
{
"epoch": 2.3,
"grad_norm": 8.061651229858398,
"learning_rate": 9.849999999999999e-07,
"loss": 0.2225,
"step": 4925
},
{
"epoch": 2.31,
"grad_norm": 9.418707847595215,
"learning_rate": 9.9e-07,
"loss": 0.2114,
"step": 4950
},
{
"epoch": 2.32,
"grad_norm": 7.523941516876221,
"learning_rate": 9.95e-07,
"loss": 0.2067,
"step": 4975
},
{
"epoch": 2.33,
"grad_norm": 10.119476318359375,
"learning_rate": 0.0,
"loss": 0.2192,
"step": 5000
},
{
"epoch": 2.33,
"eval_loss": 0.21544259786605835,
"eval_runtime": 1354.6965,
"eval_samples_per_second": 6.949,
"eval_steps_per_second": 0.435,
"eval_wer": 0.14422069232381535,
"step": 5000
},
{
"epoch": 2.33,
"step": 5000,
"total_flos": 2.308683219546931e+19,
"train_loss": 0.3779804196357727,
"train_runtime": 15838.189,
"train_samples_per_second": 5.051,
"train_steps_per_second": 0.316
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"total_flos": 2.308683219546931e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}