showgan's picture
Training in progress, step 1000
09b13b3 verified
{
"best_metric": 18.301895430821354,
"best_model_checkpoint": "./checkpoint-2000",
"epoch": 39.119804400978,
"eval_steps": 1000,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.24,
"grad_norm": 39.86157989501953,
"learning_rate": 5.000000000000001e-07,
"loss": 2.0555,
"step": 25
},
{
"epoch": 0.49,
"grad_norm": Infinity,
"learning_rate": 9.800000000000001e-07,
"loss": 1.5219,
"step": 50
},
{
"epoch": 0.73,
"grad_norm": 6.197519779205322,
"learning_rate": 1.48e-06,
"loss": 1.0167,
"step": 75
},
{
"epoch": 0.98,
"grad_norm": 5.485505104064941,
"learning_rate": 1.98e-06,
"loss": 0.7299,
"step": 100
},
{
"epoch": 1.22,
"grad_norm": 5.534335613250732,
"learning_rate": 2.4800000000000004e-06,
"loss": 0.6317,
"step": 125
},
{
"epoch": 1.47,
"grad_norm": 4.898209095001221,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.5503,
"step": 150
},
{
"epoch": 1.71,
"grad_norm": 5.0602946281433105,
"learning_rate": 3.48e-06,
"loss": 0.4998,
"step": 175
},
{
"epoch": 1.96,
"grad_norm": 4.8069305419921875,
"learning_rate": 3.980000000000001e-06,
"loss": 0.4457,
"step": 200
},
{
"epoch": 2.2,
"grad_norm": 4.571457386016846,
"learning_rate": 4.48e-06,
"loss": 0.3778,
"step": 225
},
{
"epoch": 2.44,
"grad_norm": 4.707397937774658,
"learning_rate": 4.980000000000001e-06,
"loss": 0.333,
"step": 250
},
{
"epoch": 2.69,
"grad_norm": 4.317914009094238,
"learning_rate": 5.480000000000001e-06,
"loss": 0.2859,
"step": 275
},
{
"epoch": 2.93,
"grad_norm": 2.6433582305908203,
"learning_rate": 5.98e-06,
"loss": 0.2229,
"step": 300
},
{
"epoch": 3.18,
"grad_norm": 2.5579586029052734,
"learning_rate": 6.480000000000001e-06,
"loss": 0.1767,
"step": 325
},
{
"epoch": 3.42,
"grad_norm": 2.2651941776275635,
"learning_rate": 6.98e-06,
"loss": 0.1474,
"step": 350
},
{
"epoch": 3.67,
"grad_norm": 2.528773546218872,
"learning_rate": 7.48e-06,
"loss": 0.1493,
"step": 375
},
{
"epoch": 3.91,
"grad_norm": 2.422232151031494,
"learning_rate": 7.980000000000002e-06,
"loss": 0.142,
"step": 400
},
{
"epoch": 4.16,
"grad_norm": 2.209630012512207,
"learning_rate": 8.48e-06,
"loss": 0.1126,
"step": 425
},
{
"epoch": 4.4,
"grad_norm": 1.9945831298828125,
"learning_rate": 8.98e-06,
"loss": 0.0909,
"step": 450
},
{
"epoch": 4.65,
"grad_norm": 1.8972020149230957,
"learning_rate": 9.48e-06,
"loss": 0.0892,
"step": 475
},
{
"epoch": 4.89,
"grad_norm": 2.345607042312622,
"learning_rate": 9.980000000000001e-06,
"loss": 0.0909,
"step": 500
},
{
"epoch": 5.13,
"grad_norm": 1.3913277387619019,
"learning_rate": 9.946666666666667e-06,
"loss": 0.0713,
"step": 525
},
{
"epoch": 5.38,
"grad_norm": 2.5306901931762695,
"learning_rate": 9.891111111111113e-06,
"loss": 0.0535,
"step": 550
},
{
"epoch": 5.62,
"grad_norm": 1.3689407110214233,
"learning_rate": 9.835555555555556e-06,
"loss": 0.051,
"step": 575
},
{
"epoch": 5.87,
"grad_norm": 2.091756820678711,
"learning_rate": 9.780000000000001e-06,
"loss": 0.0573,
"step": 600
},
{
"epoch": 6.11,
"grad_norm": 1.5179609060287476,
"learning_rate": 9.724444444444445e-06,
"loss": 0.0454,
"step": 625
},
{
"epoch": 6.36,
"grad_norm": 1.3439680337905884,
"learning_rate": 9.66888888888889e-06,
"loss": 0.029,
"step": 650
},
{
"epoch": 6.6,
"grad_norm": 1.279569387435913,
"learning_rate": 9.613333333333335e-06,
"loss": 0.032,
"step": 675
},
{
"epoch": 6.85,
"grad_norm": 1.6171939373016357,
"learning_rate": 9.557777777777777e-06,
"loss": 0.0314,
"step": 700
},
{
"epoch": 7.09,
"grad_norm": 0.8949939608573914,
"learning_rate": 9.502222222222223e-06,
"loss": 0.0245,
"step": 725
},
{
"epoch": 7.33,
"grad_norm": 1.3610639572143555,
"learning_rate": 9.446666666666667e-06,
"loss": 0.0165,
"step": 750
},
{
"epoch": 7.58,
"grad_norm": 1.5446442365646362,
"learning_rate": 9.391111111111111e-06,
"loss": 0.0169,
"step": 775
},
{
"epoch": 7.82,
"grad_norm": 1.2825671434402466,
"learning_rate": 9.335555555555557e-06,
"loss": 0.0193,
"step": 800
},
{
"epoch": 8.07,
"grad_norm": 0.7063258290290833,
"learning_rate": 9.280000000000001e-06,
"loss": 0.0154,
"step": 825
},
{
"epoch": 8.31,
"grad_norm": 0.72287917137146,
"learning_rate": 9.224444444444445e-06,
"loss": 0.0102,
"step": 850
},
{
"epoch": 8.56,
"grad_norm": 1.2877657413482666,
"learning_rate": 9.168888888888889e-06,
"loss": 0.0093,
"step": 875
},
{
"epoch": 8.8,
"grad_norm": 1.6262348890304565,
"learning_rate": 9.113333333333335e-06,
"loss": 0.0117,
"step": 900
},
{
"epoch": 9.05,
"grad_norm": 0.49374520778656006,
"learning_rate": 9.057777777777779e-06,
"loss": 0.0121,
"step": 925
},
{
"epoch": 9.29,
"grad_norm": 0.8290354013442993,
"learning_rate": 9.002222222222223e-06,
"loss": 0.0063,
"step": 950
},
{
"epoch": 9.54,
"grad_norm": 0.9974227547645569,
"learning_rate": 8.946666666666669e-06,
"loss": 0.006,
"step": 975
},
{
"epoch": 9.78,
"grad_norm": 0.8566415309906006,
"learning_rate": 8.891111111111111e-06,
"loss": 0.007,
"step": 1000
},
{
"epoch": 9.78,
"eval_loss": 0.41129249334335327,
"eval_runtime": 1450.197,
"eval_samples_per_second": 1.996,
"eval_steps_per_second": 0.499,
"eval_wer": 18.926838201629888,
"step": 1000
},
{
"epoch": 10.02,
"grad_norm": 1.7889363765716553,
"learning_rate": 8.835555555555557e-06,
"loss": 0.0067,
"step": 1025
},
{
"epoch": 10.27,
"grad_norm": 0.9269456267356873,
"learning_rate": 8.78e-06,
"loss": 0.0047,
"step": 1050
},
{
"epoch": 10.51,
"grad_norm": 0.7966364622116089,
"learning_rate": 8.724444444444445e-06,
"loss": 0.0047,
"step": 1075
},
{
"epoch": 10.76,
"grad_norm": 0.6406449675559998,
"learning_rate": 8.66888888888889e-06,
"loss": 0.0049,
"step": 1100
},
{
"epoch": 11.0,
"grad_norm": 0.4881477355957031,
"learning_rate": 8.613333333333333e-06,
"loss": 0.0049,
"step": 1125
},
{
"epoch": 11.25,
"grad_norm": 0.4843809902667999,
"learning_rate": 8.557777777777778e-06,
"loss": 0.0036,
"step": 1150
},
{
"epoch": 11.49,
"grad_norm": 0.8177527189254761,
"learning_rate": 8.502222222222223e-06,
"loss": 0.0028,
"step": 1175
},
{
"epoch": 11.74,
"grad_norm": 0.676511287689209,
"learning_rate": 8.446666666666668e-06,
"loss": 0.0031,
"step": 1200
},
{
"epoch": 11.98,
"grad_norm": 0.9199188351631165,
"learning_rate": 8.391111111111112e-06,
"loss": 0.003,
"step": 1225
},
{
"epoch": 12.22,
"grad_norm": 0.248287171125412,
"learning_rate": 8.335555555555556e-06,
"loss": 0.0024,
"step": 1250
},
{
"epoch": 12.47,
"grad_norm": 0.9288859963417053,
"learning_rate": 8.28e-06,
"loss": 0.0022,
"step": 1275
},
{
"epoch": 12.71,
"grad_norm": 0.8308430910110474,
"learning_rate": 8.224444444444444e-06,
"loss": 0.0025,
"step": 1300
},
{
"epoch": 12.96,
"grad_norm": 0.33064720034599304,
"learning_rate": 8.16888888888889e-06,
"loss": 0.0027,
"step": 1325
},
{
"epoch": 13.2,
"grad_norm": 0.5254776477813721,
"learning_rate": 8.113333333333334e-06,
"loss": 0.0019,
"step": 1350
},
{
"epoch": 13.45,
"grad_norm": 0.4681105315685272,
"learning_rate": 8.057777777777778e-06,
"loss": 0.0021,
"step": 1375
},
{
"epoch": 13.69,
"grad_norm": 0.352115273475647,
"learning_rate": 8.002222222222222e-06,
"loss": 0.0026,
"step": 1400
},
{
"epoch": 13.94,
"grad_norm": 1.5301597118377686,
"learning_rate": 7.946666666666666e-06,
"loss": 0.0027,
"step": 1425
},
{
"epoch": 14.18,
"grad_norm": 0.28577202558517456,
"learning_rate": 7.891111111111112e-06,
"loss": 0.0018,
"step": 1450
},
{
"epoch": 14.43,
"grad_norm": 0.7940084338188171,
"learning_rate": 7.835555555555556e-06,
"loss": 0.002,
"step": 1475
},
{
"epoch": 14.67,
"grad_norm": 1.031543493270874,
"learning_rate": 7.78e-06,
"loss": 0.0021,
"step": 1500
},
{
"epoch": 14.91,
"grad_norm": 0.5695396661758423,
"learning_rate": 7.724444444444446e-06,
"loss": 0.0016,
"step": 1525
},
{
"epoch": 15.16,
"grad_norm": 0.2285182625055313,
"learning_rate": 7.66888888888889e-06,
"loss": 0.0014,
"step": 1550
},
{
"epoch": 15.4,
"grad_norm": 0.40289613604545593,
"learning_rate": 7.613333333333334e-06,
"loss": 0.0017,
"step": 1575
},
{
"epoch": 15.65,
"grad_norm": 0.5758986473083496,
"learning_rate": 7.557777777777779e-06,
"loss": 0.0012,
"step": 1600
},
{
"epoch": 15.89,
"grad_norm": 0.5524174571037292,
"learning_rate": 7.502222222222223e-06,
"loss": 0.001,
"step": 1625
},
{
"epoch": 16.14,
"grad_norm": 0.7031832933425903,
"learning_rate": 7.446666666666668e-06,
"loss": 0.0015,
"step": 1650
},
{
"epoch": 16.38,
"grad_norm": 0.22649440169334412,
"learning_rate": 7.3911111111111125e-06,
"loss": 0.0012,
"step": 1675
},
{
"epoch": 16.63,
"grad_norm": 0.46751469373703003,
"learning_rate": 7.335555555555556e-06,
"loss": 0.0012,
"step": 1700
},
{
"epoch": 16.87,
"grad_norm": 0.3201611340045929,
"learning_rate": 7.280000000000001e-06,
"loss": 0.0009,
"step": 1725
},
{
"epoch": 17.11,
"grad_norm": 0.05176452174782753,
"learning_rate": 7.224444444444445e-06,
"loss": 0.0007,
"step": 1750
},
{
"epoch": 17.36,
"grad_norm": 0.5956466794013977,
"learning_rate": 7.1688888888888895e-06,
"loss": 0.0007,
"step": 1775
},
{
"epoch": 17.6,
"grad_norm": 0.1542888581752777,
"learning_rate": 7.113333333333334e-06,
"loss": 0.001,
"step": 1800
},
{
"epoch": 17.85,
"grad_norm": 0.624476432800293,
"learning_rate": 7.057777777777778e-06,
"loss": 0.0016,
"step": 1825
},
{
"epoch": 18.09,
"grad_norm": 0.3069753050804138,
"learning_rate": 7.0022222222222225e-06,
"loss": 0.0013,
"step": 1850
},
{
"epoch": 18.34,
"grad_norm": 0.3797595500946045,
"learning_rate": 6.946666666666667e-06,
"loss": 0.0018,
"step": 1875
},
{
"epoch": 18.58,
"grad_norm": 0.3482624590396881,
"learning_rate": 6.891111111111111e-06,
"loss": 0.0014,
"step": 1900
},
{
"epoch": 18.83,
"grad_norm": 0.05667097494006157,
"learning_rate": 6.835555555555556e-06,
"loss": 0.0008,
"step": 1925
},
{
"epoch": 19.07,
"grad_norm": 0.09759561717510223,
"learning_rate": 6.780000000000001e-06,
"loss": 0.0011,
"step": 1950
},
{
"epoch": 19.32,
"grad_norm": 0.5752401947975159,
"learning_rate": 6.724444444444444e-06,
"loss": 0.0014,
"step": 1975
},
{
"epoch": 19.56,
"grad_norm": 0.1028558760881424,
"learning_rate": 6.668888888888889e-06,
"loss": 0.0009,
"step": 2000
},
{
"epoch": 19.56,
"eval_loss": 0.4927152395248413,
"eval_runtime": 1461.0654,
"eval_samples_per_second": 1.981,
"eval_steps_per_second": 0.496,
"eval_wer": 18.301895430821354,
"step": 2000
},
{
"epoch": 19.8,
"grad_norm": 0.28834694623947144,
"learning_rate": 6.613333333333334e-06,
"loss": 0.0012,
"step": 2025
},
{
"epoch": 20.05,
"grad_norm": 0.38117802143096924,
"learning_rate": 6.557777777777778e-06,
"loss": 0.0017,
"step": 2050
},
{
"epoch": 20.29,
"grad_norm": 0.6632546782493591,
"learning_rate": 6.502222222222223e-06,
"loss": 0.001,
"step": 2075
},
{
"epoch": 20.54,
"grad_norm": 0.32943013310432434,
"learning_rate": 6.446666666666668e-06,
"loss": 0.0017,
"step": 2100
},
{
"epoch": 20.78,
"grad_norm": 0.6536120772361755,
"learning_rate": 6.391111111111111e-06,
"loss": 0.001,
"step": 2125
},
{
"epoch": 21.03,
"grad_norm": 0.6113318204879761,
"learning_rate": 6.335555555555556e-06,
"loss": 0.0009,
"step": 2150
},
{
"epoch": 21.27,
"grad_norm": 0.24811410903930664,
"learning_rate": 6.280000000000001e-06,
"loss": 0.0012,
"step": 2175
},
{
"epoch": 21.52,
"grad_norm": 1.5321826934814453,
"learning_rate": 6.224444444444445e-06,
"loss": 0.0015,
"step": 2200
},
{
"epoch": 21.76,
"grad_norm": 0.6304628252983093,
"learning_rate": 6.16888888888889e-06,
"loss": 0.002,
"step": 2225
},
{
"epoch": 22.0,
"grad_norm": 0.4291835427284241,
"learning_rate": 6.113333333333333e-06,
"loss": 0.0017,
"step": 2250
},
{
"epoch": 22.25,
"grad_norm": 1.250555396080017,
"learning_rate": 6.057777777777778e-06,
"loss": 0.0014,
"step": 2275
},
{
"epoch": 22.49,
"grad_norm": 0.14119359850883484,
"learning_rate": 6.002222222222223e-06,
"loss": 0.0011,
"step": 2300
},
{
"epoch": 22.74,
"grad_norm": 0.6686920523643494,
"learning_rate": 5.946666666666668e-06,
"loss": 0.0013,
"step": 2325
},
{
"epoch": 22.98,
"grad_norm": 0.6255317330360413,
"learning_rate": 5.891111111111112e-06,
"loss": 0.001,
"step": 2350
},
{
"epoch": 23.23,
"grad_norm": 0.08858698606491089,
"learning_rate": 5.8355555555555565e-06,
"loss": 0.0006,
"step": 2375
},
{
"epoch": 23.47,
"grad_norm": 0.08142836391925812,
"learning_rate": 5.78e-06,
"loss": 0.0005,
"step": 2400
},
{
"epoch": 23.72,
"grad_norm": 1.0621262788772583,
"learning_rate": 5.724444444444445e-06,
"loss": 0.0005,
"step": 2425
},
{
"epoch": 23.96,
"grad_norm": 0.04572203755378723,
"learning_rate": 5.6688888888888895e-06,
"loss": 0.0004,
"step": 2450
},
{
"epoch": 24.21,
"grad_norm": 0.03155618906021118,
"learning_rate": 5.613333333333334e-06,
"loss": 0.0002,
"step": 2475
},
{
"epoch": 24.45,
"grad_norm": 0.03207828477025032,
"learning_rate": 5.557777777777778e-06,
"loss": 0.0003,
"step": 2500
},
{
"epoch": 24.69,
"grad_norm": 0.0497412383556366,
"learning_rate": 5.5022222222222224e-06,
"loss": 0.0003,
"step": 2525
},
{
"epoch": 24.94,
"grad_norm": 0.21509559452533722,
"learning_rate": 5.4466666666666665e-06,
"loss": 0.0004,
"step": 2550
},
{
"epoch": 25.18,
"grad_norm": 0.024903174489736557,
"learning_rate": 5.391111111111111e-06,
"loss": 0.0003,
"step": 2575
},
{
"epoch": 25.43,
"grad_norm": 0.02149089053273201,
"learning_rate": 5.335555555555556e-06,
"loss": 0.0003,
"step": 2600
},
{
"epoch": 25.67,
"grad_norm": 0.014418188482522964,
"learning_rate": 5.28e-06,
"loss": 0.0002,
"step": 2625
},
{
"epoch": 25.92,
"grad_norm": 0.018349435180425644,
"learning_rate": 5.224444444444445e-06,
"loss": 0.0002,
"step": 2650
},
{
"epoch": 26.16,
"grad_norm": 0.013832501135766506,
"learning_rate": 5.168888888888889e-06,
"loss": 0.0002,
"step": 2675
},
{
"epoch": 26.41,
"grad_norm": 0.012281795963644981,
"learning_rate": 5.113333333333333e-06,
"loss": 0.0001,
"step": 2700
},
{
"epoch": 26.65,
"grad_norm": 0.009847081266343594,
"learning_rate": 5.057777777777778e-06,
"loss": 0.0001,
"step": 2725
},
{
"epoch": 26.89,
"grad_norm": 0.020844602957367897,
"learning_rate": 5.002222222222223e-06,
"loss": 0.0002,
"step": 2750
},
{
"epoch": 27.14,
"grad_norm": 0.012561388313770294,
"learning_rate": 4.946666666666667e-06,
"loss": 0.0003,
"step": 2775
},
{
"epoch": 27.38,
"grad_norm": 0.0130954310297966,
"learning_rate": 4.891111111111111e-06,
"loss": 0.0002,
"step": 2800
},
{
"epoch": 27.63,
"grad_norm": 0.008950438350439072,
"learning_rate": 4.835555555555556e-06,
"loss": 0.0001,
"step": 2825
},
{
"epoch": 27.87,
"grad_norm": 0.007355996407568455,
"learning_rate": 4.78e-06,
"loss": 0.0001,
"step": 2850
},
{
"epoch": 28.12,
"grad_norm": 0.008460123091936111,
"learning_rate": 4.724444444444445e-06,
"loss": 0.0001,
"step": 2875
},
{
"epoch": 28.36,
"grad_norm": 0.007149124052375555,
"learning_rate": 4.66888888888889e-06,
"loss": 0.0001,
"step": 2900
},
{
"epoch": 28.61,
"grad_norm": 0.008197379298508167,
"learning_rate": 4.613333333333334e-06,
"loss": 0.0001,
"step": 2925
},
{
"epoch": 28.85,
"grad_norm": 0.00648567546159029,
"learning_rate": 4.557777777777778e-06,
"loss": 0.0001,
"step": 2950
},
{
"epoch": 29.1,
"grad_norm": 0.006952579598873854,
"learning_rate": 4.502222222222223e-06,
"loss": 0.0001,
"step": 2975
},
{
"epoch": 29.34,
"grad_norm": 0.009140390902757645,
"learning_rate": 4.446666666666667e-06,
"loss": 0.0001,
"step": 3000
},
{
"epoch": 29.34,
"eval_loss": 0.535641610622406,
"eval_runtime": 1473.7946,
"eval_samples_per_second": 1.964,
"eval_steps_per_second": 0.491,
"eval_wer": 18.395751304825566,
"step": 3000
},
{
"epoch": 29.58,
"grad_norm": 0.007503976579755545,
"learning_rate": 4.391111111111112e-06,
"loss": 0.0001,
"step": 3025
},
{
"epoch": 29.83,
"grad_norm": 0.005778305232524872,
"learning_rate": 4.3355555555555565e-06,
"loss": 0.0001,
"step": 3050
},
{
"epoch": 30.07,
"grad_norm": 0.007032153662294149,
"learning_rate": 4.2800000000000005e-06,
"loss": 0.0001,
"step": 3075
},
{
"epoch": 30.32,
"grad_norm": 0.005656179040670395,
"learning_rate": 4.2244444444444446e-06,
"loss": 0.0001,
"step": 3100
},
{
"epoch": 30.56,
"grad_norm": 0.006409101653844118,
"learning_rate": 4.168888888888889e-06,
"loss": 0.0001,
"step": 3125
},
{
"epoch": 30.81,
"grad_norm": 0.006126554682850838,
"learning_rate": 4.1133333333333335e-06,
"loss": 0.0001,
"step": 3150
},
{
"epoch": 31.05,
"grad_norm": 0.005458911880850792,
"learning_rate": 4.057777777777778e-06,
"loss": 0.0001,
"step": 3175
},
{
"epoch": 31.3,
"grad_norm": 0.005242755636572838,
"learning_rate": 4.002222222222222e-06,
"loss": 0.0001,
"step": 3200
},
{
"epoch": 31.54,
"grad_norm": 0.005507026333361864,
"learning_rate": 3.946666666666667e-06,
"loss": 0.0001,
"step": 3225
},
{
"epoch": 31.78,
"grad_norm": 0.006995031144469976,
"learning_rate": 3.891111111111111e-06,
"loss": 0.0001,
"step": 3250
},
{
"epoch": 32.03,
"grad_norm": 0.0054997107945382595,
"learning_rate": 3.835555555555555e-06,
"loss": 0.0001,
"step": 3275
},
{
"epoch": 32.27,
"grad_norm": 0.004706221166998148,
"learning_rate": 3.7800000000000002e-06,
"loss": 0.0001,
"step": 3300
},
{
"epoch": 32.52,
"grad_norm": 0.005071236286312342,
"learning_rate": 3.724444444444445e-06,
"loss": 0.0001,
"step": 3325
},
{
"epoch": 32.76,
"grad_norm": 0.005186586640775204,
"learning_rate": 3.668888888888889e-06,
"loss": 0.0001,
"step": 3350
},
{
"epoch": 33.01,
"grad_norm": 0.006826834753155708,
"learning_rate": 3.6133333333333336e-06,
"loss": 0.0001,
"step": 3375
},
{
"epoch": 33.25,
"grad_norm": 0.005360448732972145,
"learning_rate": 3.5577777777777785e-06,
"loss": 0.0001,
"step": 3400
},
{
"epoch": 33.5,
"grad_norm": 0.004380129277706146,
"learning_rate": 3.5022222222222225e-06,
"loss": 0.0001,
"step": 3425
},
{
"epoch": 33.74,
"grad_norm": 0.006299168337136507,
"learning_rate": 3.446666666666667e-06,
"loss": 0.0001,
"step": 3450
},
{
"epoch": 33.99,
"grad_norm": 0.006263605318963528,
"learning_rate": 3.391111111111111e-06,
"loss": 0.0001,
"step": 3475
},
{
"epoch": 34.23,
"grad_norm": 0.004375001415610313,
"learning_rate": 3.335555555555556e-06,
"loss": 0.0001,
"step": 3500
},
{
"epoch": 34.47,
"grad_norm": 0.004684335086494684,
"learning_rate": 3.2800000000000004e-06,
"loss": 0.0001,
"step": 3525
},
{
"epoch": 34.72,
"grad_norm": 0.004894171841442585,
"learning_rate": 3.2244444444444444e-06,
"loss": 0.0001,
"step": 3550
},
{
"epoch": 34.96,
"grad_norm": 0.005006886553019285,
"learning_rate": 3.1688888888888893e-06,
"loss": 0.0001,
"step": 3575
},
{
"epoch": 35.21,
"grad_norm": 0.004793678876012564,
"learning_rate": 3.1133333333333337e-06,
"loss": 0.0001,
"step": 3600
},
{
"epoch": 35.45,
"grad_norm": 0.0059304991737008095,
"learning_rate": 3.0577777777777778e-06,
"loss": 0.0001,
"step": 3625
},
{
"epoch": 35.7,
"grad_norm": 0.00519231241196394,
"learning_rate": 3.0022222222222227e-06,
"loss": 0.0001,
"step": 3650
},
{
"epoch": 35.94,
"grad_norm": 0.003882919903844595,
"learning_rate": 2.946666666666667e-06,
"loss": 0.0001,
"step": 3675
},
{
"epoch": 36.19,
"grad_norm": 0.004180469550192356,
"learning_rate": 2.891111111111111e-06,
"loss": 0.0001,
"step": 3700
},
{
"epoch": 36.43,
"grad_norm": 0.004309108946472406,
"learning_rate": 2.835555555555556e-06,
"loss": 0.0001,
"step": 3725
},
{
"epoch": 36.67,
"grad_norm": 0.004415574017912149,
"learning_rate": 2.7800000000000005e-06,
"loss": 0.0001,
"step": 3750
},
{
"epoch": 36.92,
"grad_norm": 0.004281037952750921,
"learning_rate": 2.7244444444444445e-06,
"loss": 0.0001,
"step": 3775
},
{
"epoch": 37.16,
"grad_norm": 0.00387546862475574,
"learning_rate": 2.6688888888888894e-06,
"loss": 0.0001,
"step": 3800
},
{
"epoch": 37.41,
"grad_norm": 0.004122734069824219,
"learning_rate": 2.6133333333333334e-06,
"loss": 0.0001,
"step": 3825
},
{
"epoch": 37.65,
"grad_norm": 0.004634737502783537,
"learning_rate": 2.557777777777778e-06,
"loss": 0.0001,
"step": 3850
},
{
"epoch": 37.9,
"grad_norm": 0.003926947247236967,
"learning_rate": 2.5022222222222224e-06,
"loss": 0.0001,
"step": 3875
},
{
"epoch": 38.14,
"grad_norm": 0.0039747897535562515,
"learning_rate": 2.446666666666667e-06,
"loss": 0.0001,
"step": 3900
},
{
"epoch": 38.39,
"grad_norm": 0.003933820873498917,
"learning_rate": 2.3911111111111113e-06,
"loss": 0.0001,
"step": 3925
},
{
"epoch": 38.63,
"grad_norm": 0.0039012329652905464,
"learning_rate": 2.3355555555555557e-06,
"loss": 0.0001,
"step": 3950
},
{
"epoch": 38.88,
"grad_norm": 0.0034920210018754005,
"learning_rate": 2.28e-06,
"loss": 0.0001,
"step": 3975
},
{
"epoch": 39.12,
"grad_norm": 0.004213281441479921,
"learning_rate": 2.2244444444444447e-06,
"loss": 0.0001,
"step": 4000
},
{
"epoch": 39.12,
"eval_loss": 0.5614951848983765,
"eval_runtime": 1476.654,
"eval_samples_per_second": 1.96,
"eval_steps_per_second": 0.49,
"eval_wer": 18.48273967585386,
"step": 4000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 1000,
"total_flos": 7.383284315947008e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}