whis-small-bn / checkpoint-6000 /trainer_state.json
drygegedsgryeuj's picture
Upload folder using huggingface_hub
9c004ff verified
raw
history blame contribute delete
No virus
48.8 kB
{
"best_metric": 44.81641468682505,
"best_model_checkpoint": "./whisper-small-bn/checkpoint-6000",
"epoch": 6.0,
"eval_steps": 1000,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"grad_norm": 27.802248001098633,
"learning_rate": 3.2e-07,
"loss": 2.2811,
"step": 20
},
{
"epoch": 0.13,
"grad_norm": 18.253421783447266,
"learning_rate": 7.2e-07,
"loss": 2.1964,
"step": 40
},
{
"epoch": 0.2,
"grad_norm": 13.585515975952148,
"learning_rate": 1.12e-06,
"loss": 1.8635,
"step": 60
},
{
"epoch": 0.27,
"grad_norm": 22.400362014770508,
"learning_rate": 1.52e-06,
"loss": 1.703,
"step": 80
},
{
"epoch": 0.33,
"grad_norm": 13.77535343170166,
"learning_rate": 1.9200000000000003e-06,
"loss": 1.5336,
"step": 100
},
{
"epoch": 0.4,
"grad_norm": 14.989215850830078,
"learning_rate": 2.3200000000000002e-06,
"loss": 1.4044,
"step": 120
},
{
"epoch": 0.47,
"grad_norm": 22.01858139038086,
"learning_rate": 2.7200000000000002e-06,
"loss": 1.2402,
"step": 140
},
{
"epoch": 0.53,
"grad_norm": 16.482311248779297,
"learning_rate": 3.12e-06,
"loss": 1.0922,
"step": 160
},
{
"epoch": 0.6,
"grad_norm": 17.529874801635742,
"learning_rate": 3.52e-06,
"loss": 0.9583,
"step": 180
},
{
"epoch": 0.67,
"grad_norm": 18.252761840820312,
"learning_rate": 3.920000000000001e-06,
"loss": 0.8783,
"step": 200
},
{
"epoch": 0.73,
"grad_norm": 16.48067283630371,
"learning_rate": 4.32e-06,
"loss": 0.7643,
"step": 220
},
{
"epoch": 0.8,
"grad_norm": 15.062904357910156,
"learning_rate": 4.7200000000000005e-06,
"loss": 0.6991,
"step": 240
},
{
"epoch": 0.87,
"grad_norm": 14.231403350830078,
"learning_rate": 5.12e-06,
"loss": 0.6594,
"step": 260
},
{
"epoch": 0.93,
"grad_norm": 18.425230026245117,
"learning_rate": 5.5200000000000005e-06,
"loss": 0.5724,
"step": 280
},
{
"epoch": 1.0,
"grad_norm": 11.786844253540039,
"learning_rate": 5.92e-06,
"loss": 0.5687,
"step": 300
},
{
"epoch": 1.07,
"grad_norm": 15.346015930175781,
"learning_rate": 6.3200000000000005e-06,
"loss": 0.4696,
"step": 320
},
{
"epoch": 1.13,
"grad_norm": 7.936849594116211,
"learning_rate": 6.720000000000001e-06,
"loss": 0.4569,
"step": 340
},
{
"epoch": 1.2,
"grad_norm": 13.894779205322266,
"learning_rate": 7.1200000000000004e-06,
"loss": 0.4089,
"step": 360
},
{
"epoch": 1.27,
"grad_norm": 11.665081977844238,
"learning_rate": 7.520000000000001e-06,
"loss": 0.4168,
"step": 380
},
{
"epoch": 1.33,
"grad_norm": 16.7696475982666,
"learning_rate": 7.92e-06,
"loss": 0.4433,
"step": 400
},
{
"epoch": 1.4,
"grad_norm": 15.60254192352295,
"learning_rate": 8.32e-06,
"loss": 0.4013,
"step": 420
},
{
"epoch": 1.47,
"grad_norm": 13.72472858428955,
"learning_rate": 8.720000000000001e-06,
"loss": 0.3903,
"step": 440
},
{
"epoch": 1.53,
"grad_norm": 10.506444931030273,
"learning_rate": 9.12e-06,
"loss": 0.3581,
"step": 460
},
{
"epoch": 1.6,
"grad_norm": 13.643900871276855,
"learning_rate": 9.52e-06,
"loss": 0.3605,
"step": 480
},
{
"epoch": 1.67,
"grad_norm": 11.669689178466797,
"learning_rate": 9.920000000000002e-06,
"loss": 0.3548,
"step": 500
},
{
"epoch": 1.73,
"grad_norm": 9.907474517822266,
"learning_rate": 9.936000000000001e-06,
"loss": 0.3535,
"step": 520
},
{
"epoch": 1.8,
"grad_norm": 11.366904258728027,
"learning_rate": 9.856000000000002e-06,
"loss": 0.2943,
"step": 540
},
{
"epoch": 1.87,
"grad_norm": 7.5114240646362305,
"learning_rate": 9.776000000000001e-06,
"loss": 0.3057,
"step": 560
},
{
"epoch": 1.93,
"grad_norm": 11.717443466186523,
"learning_rate": 9.696000000000002e-06,
"loss": 0.2919,
"step": 580
},
{
"epoch": 2.0,
"grad_norm": 9.275129318237305,
"learning_rate": 9.616e-06,
"loss": 0.3115,
"step": 600
},
{
"epoch": 2.07,
"grad_norm": 9.388382911682129,
"learning_rate": 9.536000000000002e-06,
"loss": 0.1926,
"step": 620
},
{
"epoch": 2.13,
"grad_norm": 8.574433326721191,
"learning_rate": 9.456e-06,
"loss": 0.2193,
"step": 640
},
{
"epoch": 2.2,
"grad_norm": 5.512338638305664,
"learning_rate": 9.376000000000001e-06,
"loss": 0.18,
"step": 660
},
{
"epoch": 2.27,
"grad_norm": 9.644317626953125,
"learning_rate": 9.296e-06,
"loss": 0.1916,
"step": 680
},
{
"epoch": 2.33,
"grad_norm": 9.321474075317383,
"learning_rate": 9.216000000000001e-06,
"loss": 0.2055,
"step": 700
},
{
"epoch": 2.4,
"grad_norm": 4.868476390838623,
"learning_rate": 9.136e-06,
"loss": 0.1843,
"step": 720
},
{
"epoch": 2.47,
"grad_norm": 6.592445373535156,
"learning_rate": 9.056000000000001e-06,
"loss": 0.1713,
"step": 740
},
{
"epoch": 2.53,
"grad_norm": 8.138116836547852,
"learning_rate": 8.976e-06,
"loss": 0.1792,
"step": 760
},
{
"epoch": 2.6,
"grad_norm": 9.782510757446289,
"learning_rate": 8.896000000000001e-06,
"loss": 0.1796,
"step": 780
},
{
"epoch": 2.67,
"grad_norm": 5.867671012878418,
"learning_rate": 8.816000000000002e-06,
"loss": 0.1652,
"step": 800
},
{
"epoch": 2.73,
"grad_norm": 6.637588977813721,
"learning_rate": 8.736e-06,
"loss": 0.1735,
"step": 820
},
{
"epoch": 2.8,
"grad_norm": 11.652266502380371,
"learning_rate": 8.656000000000001e-06,
"loss": 0.1734,
"step": 840
},
{
"epoch": 2.87,
"grad_norm": 6.051203727722168,
"learning_rate": 8.576e-06,
"loss": 0.1557,
"step": 860
},
{
"epoch": 2.93,
"grad_norm": 11.018319129943848,
"learning_rate": 8.496000000000001e-06,
"loss": 0.1668,
"step": 880
},
{
"epoch": 3.0,
"grad_norm": 7.878730297088623,
"learning_rate": 8.416e-06,
"loss": 0.1706,
"step": 900
},
{
"epoch": 3.07,
"grad_norm": 6.223515510559082,
"learning_rate": 8.336000000000001e-06,
"loss": 0.0917,
"step": 920
},
{
"epoch": 3.13,
"grad_norm": 7.449841022491455,
"learning_rate": 8.256e-06,
"loss": 0.0853,
"step": 940
},
{
"epoch": 3.2,
"grad_norm": 3.824934244155884,
"learning_rate": 8.176000000000001e-06,
"loss": 0.1033,
"step": 960
},
{
"epoch": 3.27,
"grad_norm": 6.369926929473877,
"learning_rate": 8.096e-06,
"loss": 0.0997,
"step": 980
},
{
"epoch": 3.33,
"grad_norm": 6.938141822814941,
"learning_rate": 8.016e-06,
"loss": 0.095,
"step": 1000
},
{
"epoch": 3.33,
"eval_loss": 0.27550819516181946,
"eval_runtime": 345.1723,
"eval_samples_per_second": 0.29,
"eval_steps_per_second": 0.29,
"eval_wer": 60.18099547511312,
"step": 1000
},
{
"epoch": 2.18,
"grad_norm": 7.869150638580322,
"learning_rate": 7.936e-06,
"loss": 0.1443,
"step": 1020
},
{
"epoch": 2.23,
"grad_norm": 3.6447396278381348,
"learning_rate": 7.860000000000001e-06,
"loss": 0.1974,
"step": 1040
},
{
"epoch": 2.27,
"grad_norm": 9.588157653808594,
"learning_rate": 7.78e-06,
"loss": 0.1427,
"step": 1060
},
{
"epoch": 2.31,
"grad_norm": 10.796669006347656,
"learning_rate": 7.7e-06,
"loss": 0.187,
"step": 1080
},
{
"epoch": 2.36,
"grad_norm": 4.941339015960693,
"learning_rate": 7.620000000000001e-06,
"loss": 0.1568,
"step": 1100
},
{
"epoch": 2.4,
"grad_norm": 11.114888191223145,
"learning_rate": 7.540000000000001e-06,
"loss": 0.1697,
"step": 1120
},
{
"epoch": 2.44,
"grad_norm": 10.200749397277832,
"learning_rate": 7.4600000000000006e-06,
"loss": 0.1675,
"step": 1140
},
{
"epoch": 2.48,
"grad_norm": 5.67927360534668,
"learning_rate": 7.3800000000000005e-06,
"loss": 0.1557,
"step": 1160
},
{
"epoch": 2.53,
"grad_norm": 7.413832664489746,
"learning_rate": 7.304000000000001e-06,
"loss": 0.1629,
"step": 1180
},
{
"epoch": 2.57,
"grad_norm": 6.933262348175049,
"learning_rate": 7.224000000000001e-06,
"loss": 0.17,
"step": 1200
},
{
"epoch": 2.61,
"grad_norm": 4.847285747528076,
"learning_rate": 7.1440000000000005e-06,
"loss": 0.1389,
"step": 1220
},
{
"epoch": 2.66,
"grad_norm": 19.55495834350586,
"learning_rate": 7.0640000000000005e-06,
"loss": 0.1338,
"step": 1240
},
{
"epoch": 2.7,
"grad_norm": 7.148222923278809,
"learning_rate": 6.984e-06,
"loss": 0.1296,
"step": 1260
},
{
"epoch": 2.74,
"grad_norm": 6.323164939880371,
"learning_rate": 6.904e-06,
"loss": 0.163,
"step": 1280
},
{
"epoch": 2.78,
"grad_norm": 9.096046447753906,
"learning_rate": 6.824e-06,
"loss": 0.1942,
"step": 1300
},
{
"epoch": 2.83,
"grad_norm": 8.462915420532227,
"learning_rate": 6.744e-06,
"loss": 0.1576,
"step": 1320
},
{
"epoch": 2.87,
"grad_norm": 7.848359107971191,
"learning_rate": 6.664e-06,
"loss": 0.1219,
"step": 1340
},
{
"epoch": 2.91,
"grad_norm": 6.30383825302124,
"learning_rate": 6.584e-06,
"loss": 0.1404,
"step": 1360
},
{
"epoch": 2.96,
"grad_norm": 5.657530784606934,
"learning_rate": 6.504e-06,
"loss": 0.1449,
"step": 1380
},
{
"epoch": 3.0,
"grad_norm": 7.4172234535217285,
"learning_rate": 6.424e-06,
"loss": 0.1234,
"step": 1400
},
{
"epoch": 3.04,
"grad_norm": 6.550841331481934,
"learning_rate": 6.344e-06,
"loss": 0.0885,
"step": 1420
},
{
"epoch": 3.08,
"grad_norm": 6.212351322174072,
"learning_rate": 6.264e-06,
"loss": 0.0725,
"step": 1440
},
{
"epoch": 3.13,
"grad_norm": 5.2562575340271,
"learning_rate": 6.184e-06,
"loss": 0.0886,
"step": 1460
},
{
"epoch": 3.17,
"grad_norm": 7.921740531921387,
"learning_rate": 6.104000000000001e-06,
"loss": 0.0805,
"step": 1480
},
{
"epoch": 3.21,
"grad_norm": 3.5878069400787354,
"learning_rate": 6.024000000000001e-06,
"loss": 0.0628,
"step": 1500
},
{
"epoch": 3.25,
"grad_norm": 8.585208892822266,
"learning_rate": 5.944000000000001e-06,
"loss": 0.0812,
"step": 1520
},
{
"epoch": 3.3,
"grad_norm": 4.111368656158447,
"learning_rate": 5.868e-06,
"loss": 0.0694,
"step": 1540
},
{
"epoch": 3.34,
"grad_norm": 5.304944038391113,
"learning_rate": 5.788e-06,
"loss": 0.0739,
"step": 1560
},
{
"epoch": 3.38,
"grad_norm": 6.168578147888184,
"learning_rate": 5.708e-06,
"loss": 0.0821,
"step": 1580
},
{
"epoch": 3.43,
"grad_norm": 12.2472505569458,
"learning_rate": 5.628e-06,
"loss": 0.0885,
"step": 1600
},
{
"epoch": 3.47,
"grad_norm": 9.349952697753906,
"learning_rate": 5.548e-06,
"loss": 0.0607,
"step": 1620
},
{
"epoch": 3.51,
"grad_norm": 5.98253870010376,
"learning_rate": 5.468e-06,
"loss": 0.0619,
"step": 1640
},
{
"epoch": 3.55,
"grad_norm": 7.106723785400391,
"learning_rate": 5.388e-06,
"loss": 0.0992,
"step": 1660
},
{
"epoch": 3.6,
"grad_norm": 3.693020820617676,
"learning_rate": 5.308000000000001e-06,
"loss": 0.0855,
"step": 1680
},
{
"epoch": 3.64,
"grad_norm": 7.3649678230285645,
"learning_rate": 5.228000000000001e-06,
"loss": 0.081,
"step": 1700
},
{
"epoch": 3.68,
"grad_norm": 5.4454240798950195,
"learning_rate": 5.1480000000000005e-06,
"loss": 0.0872,
"step": 1720
},
{
"epoch": 3.73,
"grad_norm": 8.854460716247559,
"learning_rate": 5.0680000000000004e-06,
"loss": 0.0869,
"step": 1740
},
{
"epoch": 3.77,
"grad_norm": 3.8615269660949707,
"learning_rate": 4.988e-06,
"loss": 0.0821,
"step": 1760
},
{
"epoch": 3.81,
"grad_norm": 3.7190287113189697,
"learning_rate": 4.908e-06,
"loss": 0.0636,
"step": 1780
},
{
"epoch": 3.85,
"grad_norm": 7.683879375457764,
"learning_rate": 4.828e-06,
"loss": 0.0892,
"step": 1800
},
{
"epoch": 3.9,
"grad_norm": 3.645141839981079,
"learning_rate": 4.748e-06,
"loss": 0.0803,
"step": 1820
},
{
"epoch": 3.94,
"grad_norm": 3.8167316913604736,
"learning_rate": 4.668e-06,
"loss": 0.1012,
"step": 1840
},
{
"epoch": 3.98,
"grad_norm": 7.247605323791504,
"learning_rate": 4.588e-06,
"loss": 0.0728,
"step": 1860
},
{
"epoch": 4.03,
"grad_norm": 3.5495615005493164,
"learning_rate": 4.508e-06,
"loss": 0.0439,
"step": 1880
},
{
"epoch": 4.07,
"grad_norm": 4.105050086975098,
"learning_rate": 4.428000000000001e-06,
"loss": 0.0357,
"step": 1900
},
{
"epoch": 4.11,
"grad_norm": 9.399913787841797,
"learning_rate": 4.3480000000000006e-06,
"loss": 0.0363,
"step": 1920
},
{
"epoch": 4.15,
"grad_norm": 3.0376510620117188,
"learning_rate": 4.2680000000000005e-06,
"loss": 0.0221,
"step": 1940
},
{
"epoch": 4.2,
"grad_norm": 0.6908012628555298,
"learning_rate": 4.188e-06,
"loss": 0.0323,
"step": 1960
},
{
"epoch": 4.24,
"grad_norm": 6.255786895751953,
"learning_rate": 4.108e-06,
"loss": 0.0327,
"step": 1980
},
{
"epoch": 4.28,
"grad_norm": 7.1336669921875,
"learning_rate": 4.028e-06,
"loss": 0.0416,
"step": 2000
},
{
"epoch": 4.28,
"eval_loss": 0.27948442101478577,
"eval_runtime": 349.9899,
"eval_samples_per_second": 0.286,
"eval_steps_per_second": 0.286,
"eval_wer": 59.0146750524109,
"step": 2000
},
{
"epoch": 1.68,
"grad_norm": 13.214738845825195,
"learning_rate": 7.250909090909092e-06,
"loss": 0.2839,
"step": 2020
},
{
"epoch": 1.7,
"grad_norm": 8.846616744995117,
"learning_rate": 7.214545454545455e-06,
"loss": 0.2221,
"step": 2040
},
{
"epoch": 1.72,
"grad_norm": 11.778766632080078,
"learning_rate": 7.178181818181818e-06,
"loss": 0.2394,
"step": 2060
},
{
"epoch": 1.73,
"grad_norm": 4.484768867492676,
"learning_rate": 7.141818181818182e-06,
"loss": 0.2299,
"step": 2080
},
{
"epoch": 1.75,
"grad_norm": 4.385435104370117,
"learning_rate": 7.105454545454546e-06,
"loss": 0.2221,
"step": 2100
},
{
"epoch": 1.77,
"grad_norm": 8.751326560974121,
"learning_rate": 7.06909090909091e-06,
"loss": 0.1966,
"step": 2120
},
{
"epoch": 1.78,
"grad_norm": 8.837333679199219,
"learning_rate": 7.032727272727273e-06,
"loss": 0.1893,
"step": 2140
},
{
"epoch": 1.8,
"grad_norm": 9.331002235412598,
"learning_rate": 6.998181818181818e-06,
"loss": 0.2365,
"step": 2160
},
{
"epoch": 1.82,
"grad_norm": 6.599325180053711,
"learning_rate": 6.961818181818183e-06,
"loss": 0.2152,
"step": 2180
},
{
"epoch": 1.83,
"grad_norm": 11.716184616088867,
"learning_rate": 6.9254545454545464e-06,
"loss": 0.203,
"step": 2200
},
{
"epoch": 1.85,
"grad_norm": 6.506683349609375,
"learning_rate": 6.88909090909091e-06,
"loss": 0.2162,
"step": 2220
},
{
"epoch": 1.87,
"grad_norm": 4.64941930770874,
"learning_rate": 6.852727272727273e-06,
"loss": 0.214,
"step": 2240
},
{
"epoch": 1.88,
"grad_norm": 8.344452857971191,
"learning_rate": 6.816363636363637e-06,
"loss": 0.2106,
"step": 2260
},
{
"epoch": 1.9,
"grad_norm": 5.504006385803223,
"learning_rate": 6.780000000000001e-06,
"loss": 0.196,
"step": 2280
},
{
"epoch": 1.92,
"grad_norm": 9.84432601928711,
"learning_rate": 6.743636363636365e-06,
"loss": 0.2207,
"step": 2300
},
{
"epoch": 1.93,
"grad_norm": 7.112894535064697,
"learning_rate": 6.707272727272728e-06,
"loss": 0.2072,
"step": 2320
},
{
"epoch": 1.95,
"grad_norm": 7.10872745513916,
"learning_rate": 6.670909090909091e-06,
"loss": 0.1925,
"step": 2340
},
{
"epoch": 1.97,
"grad_norm": 8.612652778625488,
"learning_rate": 6.634545454545455e-06,
"loss": 0.2023,
"step": 2360
},
{
"epoch": 1.98,
"grad_norm": 6.083488941192627,
"learning_rate": 6.5981818181818195e-06,
"loss": 0.2111,
"step": 2380
},
{
"epoch": 2.0,
"grad_norm": 7.59145975112915,
"learning_rate": 6.561818181818182e-06,
"loss": 0.1934,
"step": 2400
},
{
"epoch": 2.02,
"grad_norm": 6.529219627380371,
"learning_rate": 6.525454545454546e-06,
"loss": 0.1756,
"step": 2420
},
{
"epoch": 2.03,
"grad_norm": 13.685791015625,
"learning_rate": 6.48909090909091e-06,
"loss": 0.1872,
"step": 2440
},
{
"epoch": 2.05,
"grad_norm": 6.508156776428223,
"learning_rate": 6.4527272727272725e-06,
"loss": 0.1424,
"step": 2460
},
{
"epoch": 2.07,
"grad_norm": 4.847611427307129,
"learning_rate": 6.416363636363637e-06,
"loss": 0.1588,
"step": 2480
},
{
"epoch": 2.08,
"grad_norm": 5.715017795562744,
"learning_rate": 6.380000000000001e-06,
"loss": 0.1896,
"step": 2500
},
{
"epoch": 2.1,
"grad_norm": 10.562045097351074,
"learning_rate": 6.3436363636363644e-06,
"loss": 0.2007,
"step": 2520
},
{
"epoch": 2.12,
"grad_norm": 3.2870736122131348,
"learning_rate": 6.307272727272727e-06,
"loss": 0.179,
"step": 2540
},
{
"epoch": 2.13,
"grad_norm": 11.021241188049316,
"learning_rate": 6.270909090909092e-06,
"loss": 0.1573,
"step": 2560
},
{
"epoch": 2.15,
"grad_norm": 7.566544055938721,
"learning_rate": 6.2345454545454555e-06,
"loss": 0.1832,
"step": 2580
},
{
"epoch": 2.17,
"grad_norm": 5.980641841888428,
"learning_rate": 6.198181818181819e-06,
"loss": 0.1498,
"step": 2600
},
{
"epoch": 2.18,
"grad_norm": 5.697175025939941,
"learning_rate": 6.161818181818182e-06,
"loss": 0.1378,
"step": 2620
},
{
"epoch": 2.2,
"grad_norm": 4.451406002044678,
"learning_rate": 6.125454545454546e-06,
"loss": 0.152,
"step": 2640
},
{
"epoch": 2.22,
"grad_norm": 5.854424476623535,
"learning_rate": 6.08909090909091e-06,
"loss": 0.1492,
"step": 2660
},
{
"epoch": 2.23,
"grad_norm": 6.480546951293945,
"learning_rate": 6.052727272727274e-06,
"loss": 0.1501,
"step": 2680
},
{
"epoch": 2.25,
"grad_norm": 3.3347442150115967,
"learning_rate": 6.016363636363637e-06,
"loss": 0.132,
"step": 2700
},
{
"epoch": 2.27,
"grad_norm": 5.9716081619262695,
"learning_rate": 5.98e-06,
"loss": 0.1483,
"step": 2720
},
{
"epoch": 2.28,
"grad_norm": 5.842201232910156,
"learning_rate": 5.943636363636364e-06,
"loss": 0.1688,
"step": 2740
},
{
"epoch": 2.3,
"grad_norm": 5.079952716827393,
"learning_rate": 5.9072727272727285e-06,
"loss": 0.167,
"step": 2760
},
{
"epoch": 2.32,
"grad_norm": 9.07481575012207,
"learning_rate": 5.870909090909091e-06,
"loss": 0.1581,
"step": 2780
},
{
"epoch": 2.33,
"grad_norm": 9.30324935913086,
"learning_rate": 5.834545454545455e-06,
"loss": 0.1704,
"step": 2800
},
{
"epoch": 2.35,
"grad_norm": 4.848841667175293,
"learning_rate": 5.798181818181819e-06,
"loss": 0.1894,
"step": 2820
},
{
"epoch": 2.37,
"grad_norm": 7.52855110168457,
"learning_rate": 5.7618181818181816e-06,
"loss": 0.1702,
"step": 2840
},
{
"epoch": 2.38,
"grad_norm": 6.624313831329346,
"learning_rate": 5.725454545454546e-06,
"loss": 0.1628,
"step": 2860
},
{
"epoch": 2.4,
"grad_norm": 6.889793872833252,
"learning_rate": 5.68909090909091e-06,
"loss": 0.1705,
"step": 2880
},
{
"epoch": 2.42,
"grad_norm": 5.635292053222656,
"learning_rate": 5.6527272727272734e-06,
"loss": 0.1531,
"step": 2900
},
{
"epoch": 2.43,
"grad_norm": 4.1545729637146,
"learning_rate": 5.616363636363636e-06,
"loss": 0.1371,
"step": 2920
},
{
"epoch": 2.45,
"grad_norm": 7.1985697746276855,
"learning_rate": 5.580000000000001e-06,
"loss": 0.1571,
"step": 2940
},
{
"epoch": 2.47,
"grad_norm": 7.112518310546875,
"learning_rate": 5.5436363636363645e-06,
"loss": 0.1525,
"step": 2960
},
{
"epoch": 2.48,
"grad_norm": 9.503728866577148,
"learning_rate": 5.507272727272728e-06,
"loss": 0.1426,
"step": 2980
},
{
"epoch": 2.5,
"grad_norm": 5.497040271759033,
"learning_rate": 5.470909090909091e-06,
"loss": 0.1501,
"step": 3000
},
{
"epoch": 2.5,
"eval_loss": 0.2098405510187149,
"eval_runtime": 325.0771,
"eval_samples_per_second": 0.308,
"eval_steps_per_second": 0.308,
"eval_wer": 55.5672268907563,
"step": 3000
},
{
"epoch": 2.52,
"grad_norm": 6.7488603591918945,
"learning_rate": 5.434545454545455e-06,
"loss": 0.1647,
"step": 3020
},
{
"epoch": 2.53,
"grad_norm": 5.16122579574585,
"learning_rate": 5.398181818181819e-06,
"loss": 0.1617,
"step": 3040
},
{
"epoch": 2.55,
"grad_norm": 4.19830322265625,
"learning_rate": 5.361818181818183e-06,
"loss": 0.1484,
"step": 3060
},
{
"epoch": 2.57,
"grad_norm": 9.339705467224121,
"learning_rate": 5.325454545454546e-06,
"loss": 0.153,
"step": 3080
},
{
"epoch": 2.58,
"grad_norm": 5.106137752532959,
"learning_rate": 5.289090909090909e-06,
"loss": 0.141,
"step": 3100
},
{
"epoch": 2.6,
"grad_norm": 8.092510223388672,
"learning_rate": 5.252727272727273e-06,
"loss": 0.1443,
"step": 3120
},
{
"epoch": 2.62,
"grad_norm": 5.475297451019287,
"learning_rate": 5.2163636363636376e-06,
"loss": 0.1638,
"step": 3140
},
{
"epoch": 2.63,
"grad_norm": 8.545796394348145,
"learning_rate": 5.18e-06,
"loss": 0.1382,
"step": 3160
},
{
"epoch": 2.65,
"grad_norm": 5.189711093902588,
"learning_rate": 5.143636363636364e-06,
"loss": 0.1492,
"step": 3180
},
{
"epoch": 2.67,
"grad_norm": 7.558894157409668,
"learning_rate": 5.107272727272728e-06,
"loss": 0.1559,
"step": 3200
},
{
"epoch": 2.68,
"grad_norm": 5.985530853271484,
"learning_rate": 5.0709090909090906e-06,
"loss": 0.1605,
"step": 3220
},
{
"epoch": 2.7,
"grad_norm": 6.780598163604736,
"learning_rate": 5.034545454545455e-06,
"loss": 0.148,
"step": 3240
},
{
"epoch": 2.72,
"grad_norm": 6.225440979003906,
"learning_rate": 4.998181818181819e-06,
"loss": 0.1451,
"step": 3260
},
{
"epoch": 2.73,
"grad_norm": 5.706241607666016,
"learning_rate": 4.9618181818181824e-06,
"loss": 0.1814,
"step": 3280
},
{
"epoch": 2.75,
"grad_norm": 7.860342502593994,
"learning_rate": 4.925454545454546e-06,
"loss": 0.1407,
"step": 3300
},
{
"epoch": 2.77,
"grad_norm": 4.965442180633545,
"learning_rate": 4.88909090909091e-06,
"loss": 0.1494,
"step": 3320
},
{
"epoch": 2.78,
"grad_norm": 9.562829971313477,
"learning_rate": 4.8527272727272735e-06,
"loss": 0.1577,
"step": 3340
},
{
"epoch": 2.8,
"grad_norm": 6.261291027069092,
"learning_rate": 4.816363636363637e-06,
"loss": 0.1561,
"step": 3360
},
{
"epoch": 2.82,
"grad_norm": 5.747939109802246,
"learning_rate": 4.78e-06,
"loss": 0.1344,
"step": 3380
},
{
"epoch": 2.83,
"grad_norm": 8.154770851135254,
"learning_rate": 4.7436363636363645e-06,
"loss": 0.1419,
"step": 3400
},
{
"epoch": 2.85,
"grad_norm": 5.976426601409912,
"learning_rate": 4.707272727272727e-06,
"loss": 0.1314,
"step": 3420
},
{
"epoch": 2.87,
"grad_norm": 4.834322452545166,
"learning_rate": 4.670909090909092e-06,
"loss": 0.1128,
"step": 3440
},
{
"epoch": 2.88,
"grad_norm": 8.85062026977539,
"learning_rate": 4.634545454545455e-06,
"loss": 0.1718,
"step": 3460
},
{
"epoch": 2.9,
"grad_norm": 3.992954969406128,
"learning_rate": 4.598181818181818e-06,
"loss": 0.1341,
"step": 3480
},
{
"epoch": 2.92,
"grad_norm": 6.488000392913818,
"learning_rate": 4.561818181818182e-06,
"loss": 0.1401,
"step": 3500
},
{
"epoch": 2.93,
"grad_norm": 4.876968860626221,
"learning_rate": 4.525454545454546e-06,
"loss": 0.1319,
"step": 3520
},
{
"epoch": 2.95,
"grad_norm": 7.407408237457275,
"learning_rate": 4.489090909090909e-06,
"loss": 0.1377,
"step": 3540
},
{
"epoch": 2.97,
"grad_norm": 5.017177104949951,
"learning_rate": 4.452727272727273e-06,
"loss": 0.1732,
"step": 3560
},
{
"epoch": 2.98,
"grad_norm": 8.190740585327148,
"learning_rate": 4.416363636363637e-06,
"loss": 0.1225,
"step": 3580
},
{
"epoch": 3.0,
"grad_norm": 7.947383403778076,
"learning_rate": 4.38e-06,
"loss": 0.1454,
"step": 3600
},
{
"epoch": 3.02,
"grad_norm": 5.404466152191162,
"learning_rate": 4.343636363636364e-06,
"loss": 0.0746,
"step": 3620
},
{
"epoch": 3.03,
"grad_norm": 5.470146179199219,
"learning_rate": 4.307272727272728e-06,
"loss": 0.0725,
"step": 3640
},
{
"epoch": 3.05,
"grad_norm": 4.404048919677734,
"learning_rate": 4.2709090909090914e-06,
"loss": 0.088,
"step": 3660
},
{
"epoch": 3.07,
"grad_norm": 3.169548273086548,
"learning_rate": 4.234545454545455e-06,
"loss": 0.0811,
"step": 3680
},
{
"epoch": 3.08,
"grad_norm": 2.710052967071533,
"learning_rate": 4.198181818181819e-06,
"loss": 0.0646,
"step": 3700
},
{
"epoch": 3.1,
"grad_norm": 5.970617294311523,
"learning_rate": 4.1618181818181825e-06,
"loss": 0.0789,
"step": 3720
},
{
"epoch": 3.12,
"grad_norm": 4.657149791717529,
"learning_rate": 4.125454545454546e-06,
"loss": 0.0814,
"step": 3740
},
{
"epoch": 3.13,
"grad_norm": 4.4107666015625,
"learning_rate": 4.089090909090909e-06,
"loss": 0.0831,
"step": 3760
},
{
"epoch": 3.15,
"grad_norm": 6.185523509979248,
"learning_rate": 4.0527272727272735e-06,
"loss": 0.0672,
"step": 3780
},
{
"epoch": 3.17,
"grad_norm": 3.5439445972442627,
"learning_rate": 4.016363636363636e-06,
"loss": 0.0839,
"step": 3800
},
{
"epoch": 3.18,
"grad_norm": 5.79164981842041,
"learning_rate": 3.980000000000001e-06,
"loss": 0.0692,
"step": 3820
},
{
"epoch": 3.2,
"grad_norm": 6.883912086486816,
"learning_rate": 3.943636363636364e-06,
"loss": 0.0798,
"step": 3840
},
{
"epoch": 3.22,
"grad_norm": 4.028432846069336,
"learning_rate": 3.907272727272727e-06,
"loss": 0.0808,
"step": 3860
},
{
"epoch": 3.23,
"grad_norm": 4.477746486663818,
"learning_rate": 3.870909090909091e-06,
"loss": 0.0781,
"step": 3880
},
{
"epoch": 3.25,
"grad_norm": 2.0900027751922607,
"learning_rate": 3.834545454545455e-06,
"loss": 0.073,
"step": 3900
},
{
"epoch": 3.27,
"grad_norm": 2.609961748123169,
"learning_rate": 3.798181818181819e-06,
"loss": 0.0961,
"step": 3920
},
{
"epoch": 3.28,
"grad_norm": 6.023408889770508,
"learning_rate": 3.761818181818182e-06,
"loss": 0.0806,
"step": 3940
},
{
"epoch": 3.3,
"grad_norm": 5.091029167175293,
"learning_rate": 3.725454545454546e-06,
"loss": 0.0681,
"step": 3960
},
{
"epoch": 3.32,
"grad_norm": 5.474444389343262,
"learning_rate": 3.6890909090909094e-06,
"loss": 0.0817,
"step": 3980
},
{
"epoch": 3.33,
"grad_norm": 5.174444198608398,
"learning_rate": 3.6527272727272727e-06,
"loss": 0.0827,
"step": 4000
},
{
"epoch": 3.33,
"eval_loss": 0.20179511606693268,
"eval_runtime": 319.617,
"eval_samples_per_second": 0.313,
"eval_steps_per_second": 0.313,
"eval_wer": 51.78571428571429,
"step": 4000
},
{
"epoch": 3.35,
"grad_norm": 6.867219924926758,
"learning_rate": 3.6163636363636368e-06,
"loss": 0.0721,
"step": 4020
},
{
"epoch": 3.37,
"grad_norm": 3.8965790271759033,
"learning_rate": 3.58e-06,
"loss": 0.0912,
"step": 4040
},
{
"epoch": 3.38,
"grad_norm": 5.221298694610596,
"learning_rate": 3.543636363636364e-06,
"loss": 0.0674,
"step": 4060
},
{
"epoch": 3.4,
"grad_norm": 6.95012092590332,
"learning_rate": 3.5072727272727274e-06,
"loss": 0.098,
"step": 4080
},
{
"epoch": 3.42,
"grad_norm": 8.342905044555664,
"learning_rate": 3.4709090909090915e-06,
"loss": 0.0741,
"step": 4100
},
{
"epoch": 3.43,
"grad_norm": 4.47432804107666,
"learning_rate": 3.4345454545454547e-06,
"loss": 0.0743,
"step": 4120
},
{
"epoch": 3.45,
"grad_norm": 4.369394779205322,
"learning_rate": 3.3981818181818184e-06,
"loss": 0.0728,
"step": 4140
},
{
"epoch": 3.47,
"grad_norm": 4.1392035484313965,
"learning_rate": 3.361818181818182e-06,
"loss": 0.0576,
"step": 4160
},
{
"epoch": 3.48,
"grad_norm": 8.901311874389648,
"learning_rate": 3.3254545454545458e-06,
"loss": 0.0723,
"step": 4180
},
{
"epoch": 3.5,
"grad_norm": 10.933469772338867,
"learning_rate": 3.2890909090909094e-06,
"loss": 0.0846,
"step": 4200
},
{
"epoch": 3.52,
"grad_norm": 2.7586188316345215,
"learning_rate": 3.252727272727273e-06,
"loss": 0.0724,
"step": 4220
},
{
"epoch": 3.53,
"grad_norm": 6.231330871582031,
"learning_rate": 3.2163636363636364e-06,
"loss": 0.0833,
"step": 4240
},
{
"epoch": 3.55,
"grad_norm": 6.514349460601807,
"learning_rate": 3.1800000000000005e-06,
"loss": 0.0693,
"step": 4260
},
{
"epoch": 3.57,
"grad_norm": 6.608529090881348,
"learning_rate": 3.1436363636363637e-06,
"loss": 0.0839,
"step": 4280
},
{
"epoch": 3.58,
"grad_norm": 4.276981353759766,
"learning_rate": 3.107272727272728e-06,
"loss": 0.0783,
"step": 4300
},
{
"epoch": 3.6,
"grad_norm": 3.6309165954589844,
"learning_rate": 3.070909090909091e-06,
"loss": 0.0704,
"step": 4320
},
{
"epoch": 3.62,
"grad_norm": 4.466903209686279,
"learning_rate": 3.034545454545455e-06,
"loss": 0.0837,
"step": 4340
},
{
"epoch": 3.63,
"grad_norm": 3.0481879711151123,
"learning_rate": 2.9981818181818184e-06,
"loss": 0.0789,
"step": 4360
},
{
"epoch": 3.65,
"grad_norm": 4.183527946472168,
"learning_rate": 2.9618181818181817e-06,
"loss": 0.0679,
"step": 4380
},
{
"epoch": 3.67,
"grad_norm": 7.014336585998535,
"learning_rate": 2.9254545454545458e-06,
"loss": 0.0754,
"step": 4400
},
{
"epoch": 3.68,
"grad_norm": 3.5609641075134277,
"learning_rate": 2.889090909090909e-06,
"loss": 0.0645,
"step": 4420
},
{
"epoch": 3.7,
"grad_norm": 7.09955358505249,
"learning_rate": 2.852727272727273e-06,
"loss": 0.0582,
"step": 4440
},
{
"epoch": 3.72,
"grad_norm": 5.648538589477539,
"learning_rate": 2.8163636363636364e-06,
"loss": 0.0741,
"step": 4460
},
{
"epoch": 3.73,
"grad_norm": 4.694089889526367,
"learning_rate": 2.7800000000000005e-06,
"loss": 0.0952,
"step": 4480
},
{
"epoch": 3.75,
"grad_norm": 5.376272201538086,
"learning_rate": 2.7436363636363637e-06,
"loss": 0.0709,
"step": 4500
},
{
"epoch": 3.77,
"grad_norm": 4.306270599365234,
"learning_rate": 2.7072727272727274e-06,
"loss": 0.0833,
"step": 4520
},
{
"epoch": 3.78,
"grad_norm": 2.599487066268921,
"learning_rate": 2.670909090909091e-06,
"loss": 0.0794,
"step": 4540
},
{
"epoch": 3.8,
"grad_norm": 4.195871353149414,
"learning_rate": 2.6345454545454548e-06,
"loss": 0.0833,
"step": 4560
},
{
"epoch": 3.82,
"grad_norm": 5.1401543617248535,
"learning_rate": 2.5981818181818184e-06,
"loss": 0.0585,
"step": 4580
},
{
"epoch": 3.83,
"grad_norm": 8.566014289855957,
"learning_rate": 2.561818181818182e-06,
"loss": 0.0821,
"step": 4600
},
{
"epoch": 3.85,
"grad_norm": 4.7075419425964355,
"learning_rate": 2.525454545454546e-06,
"loss": 0.0791,
"step": 4620
},
{
"epoch": 3.87,
"grad_norm": 6.129905700683594,
"learning_rate": 2.4890909090909095e-06,
"loss": 0.0779,
"step": 4640
},
{
"epoch": 3.88,
"grad_norm": 3.7267708778381348,
"learning_rate": 2.452727272727273e-06,
"loss": 0.0612,
"step": 4660
},
{
"epoch": 3.9,
"grad_norm": 4.743310928344727,
"learning_rate": 2.416363636363637e-06,
"loss": 0.0796,
"step": 4680
},
{
"epoch": 3.92,
"grad_norm": 4.7333173751831055,
"learning_rate": 2.38e-06,
"loss": 0.073,
"step": 4700
},
{
"epoch": 3.93,
"grad_norm": 4.569043159484863,
"learning_rate": 2.3436363636363638e-06,
"loss": 0.0677,
"step": 4720
},
{
"epoch": 3.95,
"grad_norm": 4.0430755615234375,
"learning_rate": 2.3072727272727274e-06,
"loss": 0.0627,
"step": 4740
},
{
"epoch": 3.97,
"grad_norm": 6.368178367614746,
"learning_rate": 2.270909090909091e-06,
"loss": 0.0789,
"step": 4760
},
{
"epoch": 3.98,
"grad_norm": 3.39155912399292,
"learning_rate": 2.234545454545455e-06,
"loss": 0.0702,
"step": 4780
},
{
"epoch": 4.0,
"grad_norm": 6.195110321044922,
"learning_rate": 2.1981818181818185e-06,
"loss": 0.062,
"step": 4800
},
{
"epoch": 4.02,
"grad_norm": 1.523270845413208,
"learning_rate": 2.1618181818181817e-06,
"loss": 0.0333,
"step": 4820
},
{
"epoch": 4.03,
"grad_norm": 2.800551414489746,
"learning_rate": 2.1254545454545454e-06,
"loss": 0.0425,
"step": 4840
},
{
"epoch": 4.05,
"grad_norm": 2.0081329345703125,
"learning_rate": 2.089090909090909e-06,
"loss": 0.0352,
"step": 4860
},
{
"epoch": 4.07,
"grad_norm": 2.3982510566711426,
"learning_rate": 2.0527272727272727e-06,
"loss": 0.0307,
"step": 4880
},
{
"epoch": 4.08,
"grad_norm": 3.013807535171509,
"learning_rate": 2.0163636363636364e-06,
"loss": 0.0354,
"step": 4900
},
{
"epoch": 4.1,
"grad_norm": 4.255500793457031,
"learning_rate": 1.98e-06,
"loss": 0.0372,
"step": 4920
},
{
"epoch": 4.12,
"grad_norm": 4.95035982131958,
"learning_rate": 1.9436363636363638e-06,
"loss": 0.0342,
"step": 4940
},
{
"epoch": 4.13,
"grad_norm": 3.9183099269866943,
"learning_rate": 1.9072727272727272e-06,
"loss": 0.0306,
"step": 4960
},
{
"epoch": 4.15,
"grad_norm": 2.7411577701568604,
"learning_rate": 1.870909090909091e-06,
"loss": 0.0402,
"step": 4980
},
{
"epoch": 4.17,
"grad_norm": 6.921283721923828,
"learning_rate": 1.8345454545454546e-06,
"loss": 0.0459,
"step": 5000
},
{
"epoch": 4.17,
"eval_loss": 0.20849908888339996,
"eval_runtime": 329.3813,
"eval_samples_per_second": 0.304,
"eval_steps_per_second": 0.304,
"eval_wer": 47.37394957983193,
"step": 5000
},
{
"epoch": 5.02,
"grad_norm": 8.357197761535645,
"learning_rate": 3.0600000000000003e-06,
"loss": 0.1655,
"step": 5020
},
{
"epoch": 5.04,
"grad_norm": 10.608843803405762,
"learning_rate": 3.030769230769231e-06,
"loss": 0.1504,
"step": 5040
},
{
"epoch": 5.06,
"grad_norm": 12.850112915039062,
"learning_rate": 3e-06,
"loss": 0.1624,
"step": 5060
},
{
"epoch": 5.08,
"grad_norm": 7.485651969909668,
"learning_rate": 2.969230769230769e-06,
"loss": 0.16,
"step": 5080
},
{
"epoch": 5.1,
"grad_norm": 7.200412273406982,
"learning_rate": 2.938461538461539e-06,
"loss": 0.1432,
"step": 5100
},
{
"epoch": 5.12,
"grad_norm": 3.8365743160247803,
"learning_rate": 2.907692307692308e-06,
"loss": 0.1384,
"step": 5120
},
{
"epoch": 5.14,
"grad_norm": 7.808358669281006,
"learning_rate": 2.8769230769230772e-06,
"loss": 0.1554,
"step": 5140
},
{
"epoch": 5.16,
"grad_norm": 10.08485221862793,
"learning_rate": 2.846153846153846e-06,
"loss": 0.134,
"step": 5160
},
{
"epoch": 5.18,
"grad_norm": 6.713149547576904,
"learning_rate": 2.815384615384615e-06,
"loss": 0.1719,
"step": 5180
},
{
"epoch": 5.2,
"grad_norm": 7.925513744354248,
"learning_rate": 2.784615384615385e-06,
"loss": 0.1531,
"step": 5200
},
{
"epoch": 5.22,
"grad_norm": 7.1876115798950195,
"learning_rate": 2.753846153846154e-06,
"loss": 0.142,
"step": 5220
},
{
"epoch": 5.24,
"grad_norm": 8.050920486450195,
"learning_rate": 2.7230769230769234e-06,
"loss": 0.1563,
"step": 5240
},
{
"epoch": 5.26,
"grad_norm": 7.64823579788208,
"learning_rate": 2.6923076923076923e-06,
"loss": 0.1382,
"step": 5260
},
{
"epoch": 5.28,
"grad_norm": 12.070975303649902,
"learning_rate": 2.6615384615384613e-06,
"loss": 0.1455,
"step": 5280
},
{
"epoch": 5.3,
"grad_norm": 9.54661750793457,
"learning_rate": 2.630769230769231e-06,
"loss": 0.1487,
"step": 5300
},
{
"epoch": 5.32,
"grad_norm": 10.481926918029785,
"learning_rate": 2.6e-06,
"loss": 0.1259,
"step": 5320
},
{
"epoch": 5.34,
"grad_norm": 4.108445167541504,
"learning_rate": 2.5692307692307695e-06,
"loss": 0.1353,
"step": 5340
},
{
"epoch": 5.36,
"grad_norm": 8.814416885375977,
"learning_rate": 2.5384615384615385e-06,
"loss": 0.1551,
"step": 5360
},
{
"epoch": 5.38,
"grad_norm": 6.86100435256958,
"learning_rate": 2.507692307692308e-06,
"loss": 0.164,
"step": 5380
},
{
"epoch": 5.4,
"grad_norm": 6.572709560394287,
"learning_rate": 2.4769230769230773e-06,
"loss": 0.1231,
"step": 5400
},
{
"epoch": 5.42,
"grad_norm": 6.522790431976318,
"learning_rate": 2.4461538461538466e-06,
"loss": 0.1592,
"step": 5420
},
{
"epoch": 5.44,
"grad_norm": 5.718307971954346,
"learning_rate": 2.4153846153846156e-06,
"loss": 0.1233,
"step": 5440
},
{
"epoch": 5.46,
"grad_norm": 5.314148426055908,
"learning_rate": 2.384615384615385e-06,
"loss": 0.1776,
"step": 5460
},
{
"epoch": 5.48,
"grad_norm": 6.437089443206787,
"learning_rate": 2.353846153846154e-06,
"loss": 0.1327,
"step": 5480
},
{
"epoch": 5.5,
"grad_norm": 7.538025856018066,
"learning_rate": 2.3230769230769234e-06,
"loss": 0.1301,
"step": 5500
},
{
"epoch": 5.52,
"grad_norm": 7.919811248779297,
"learning_rate": 2.2923076923076928e-06,
"loss": 0.1419,
"step": 5520
},
{
"epoch": 5.54,
"grad_norm": 9.317774772644043,
"learning_rate": 2.2615384615384617e-06,
"loss": 0.1474,
"step": 5540
},
{
"epoch": 5.56,
"grad_norm": 6.526165008544922,
"learning_rate": 2.230769230769231e-06,
"loss": 0.1734,
"step": 5560
},
{
"epoch": 5.58,
"grad_norm": 3.361795663833618,
"learning_rate": 2.2e-06,
"loss": 0.1139,
"step": 5580
},
{
"epoch": 5.6,
"grad_norm": 7.677871227264404,
"learning_rate": 2.1692307692307695e-06,
"loss": 0.1441,
"step": 5600
},
{
"epoch": 5.62,
"grad_norm": 9.330676078796387,
"learning_rate": 2.138461538461539e-06,
"loss": 0.1494,
"step": 5620
},
{
"epoch": 5.64,
"grad_norm": 7.009490966796875,
"learning_rate": 2.107692307692308e-06,
"loss": 0.1208,
"step": 5640
},
{
"epoch": 5.66,
"grad_norm": 6.069945335388184,
"learning_rate": 2.0769230769230773e-06,
"loss": 0.1392,
"step": 5660
},
{
"epoch": 5.68,
"grad_norm": 8.43829345703125,
"learning_rate": 2.0461538461538462e-06,
"loss": 0.1523,
"step": 5680
},
{
"epoch": 5.7,
"grad_norm": 6.86118745803833,
"learning_rate": 2.0153846153846156e-06,
"loss": 0.1598,
"step": 5700
},
{
"epoch": 5.72,
"grad_norm": 4.472192287445068,
"learning_rate": 1.984615384615385e-06,
"loss": 0.1459,
"step": 5720
},
{
"epoch": 5.74,
"grad_norm": 4.14478874206543,
"learning_rate": 1.953846153846154e-06,
"loss": 0.1509,
"step": 5740
},
{
"epoch": 5.76,
"grad_norm": 8.51664924621582,
"learning_rate": 1.9230769230769234e-06,
"loss": 0.1341,
"step": 5760
},
{
"epoch": 5.78,
"grad_norm": 6.804210662841797,
"learning_rate": 1.8923076923076924e-06,
"loss": 0.1462,
"step": 5780
},
{
"epoch": 5.8,
"grad_norm": 5.099180698394775,
"learning_rate": 1.8615384615384616e-06,
"loss": 0.1217,
"step": 5800
},
{
"epoch": 5.82,
"grad_norm": 8.629157066345215,
"learning_rate": 1.830769230769231e-06,
"loss": 0.1583,
"step": 5820
},
{
"epoch": 5.84,
"grad_norm": 4.583296775817871,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.1271,
"step": 5840
},
{
"epoch": 5.86,
"grad_norm": 3.160738229751587,
"learning_rate": 1.7692307692307695e-06,
"loss": 0.141,
"step": 5860
},
{
"epoch": 5.88,
"grad_norm": 9.473282814025879,
"learning_rate": 1.74e-06,
"loss": 0.1605,
"step": 5880
},
{
"epoch": 5.9,
"grad_norm": 4.42061185836792,
"learning_rate": 1.7092307692307695e-06,
"loss": 0.1038,
"step": 5900
},
{
"epoch": 5.92,
"grad_norm": 2.770753860473633,
"learning_rate": 1.6784615384615384e-06,
"loss": 0.1362,
"step": 5920
},
{
"epoch": 5.94,
"grad_norm": 5.55116605758667,
"learning_rate": 1.6476923076923078e-06,
"loss": 0.1313,
"step": 5940
},
{
"epoch": 5.96,
"grad_norm": 3.3803486824035645,
"learning_rate": 1.616923076923077e-06,
"loss": 0.1424,
"step": 5960
},
{
"epoch": 5.98,
"grad_norm": 7.3435282707214355,
"learning_rate": 1.5861538461538462e-06,
"loss": 0.1209,
"step": 5980
},
{
"epoch": 6.0,
"grad_norm": 7.475185394287109,
"learning_rate": 1.5553846153846156e-06,
"loss": 0.1312,
"step": 6000
},
{
"epoch": 6.0,
"eval_loss": 0.14498481154441833,
"eval_runtime": 339.1376,
"eval_samples_per_second": 0.295,
"eval_steps_per_second": 0.295,
"eval_wer": 44.81641468682505,
"step": 6000
}
],
"logging_steps": 20,
"max_steps": 7000,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 1000,
"total_flos": 5.19396003495936e+18,
"train_batch_size": 3,
"trial_name": null,
"trial_params": null
}