|
{ |
|
"best_metric": 1.1429458856582642, |
|
"best_model_checkpoint": "temp/checkpoint-23670", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 23670, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00044, |
|
"loss": 4.2184, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00094, |
|
"loss": 2.1594, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009990685859441152, |
|
"loss": 1.7535, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009980101608806097, |
|
"loss": 1.5491, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009969517358171041, |
|
"loss": 1.5243, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009958933107535987, |
|
"loss": 1.4251, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000994834885690093, |
|
"loss": 1.4419, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009937764606265877, |
|
"loss": 1.3417, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000992718035563082, |
|
"loss": 1.4644, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009916596104995767, |
|
"loss": 1.2834, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000990601185436071, |
|
"loss": 1.3385, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0009895427603725656, |
|
"loss": 1.6052, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0009884843353090602, |
|
"loss": 1.4924, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0009874259102455546, |
|
"loss": 1.306, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0009863674851820492, |
|
"loss": 1.2142, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0009853090601185436, |
|
"loss": 1.4172, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0009842929720575784, |
|
"loss": 1.3925, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0009832345469940728, |
|
"loss": 1.2059, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0009821761219305674, |
|
"loss": 1.2021, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0009811176968670618, |
|
"loss": 1.3228, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0009800592718035564, |
|
"loss": 1.2756, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000979000846740051, |
|
"loss": 1.2971, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0009779424216765453, |
|
"loss": 1.2016, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00097688399661304, |
|
"loss": 1.1839, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0009758255715495343, |
|
"loss": 1.2254, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000974809483488569, |
|
"loss": 1.2166, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0009737510584250635, |
|
"loss": 1.2849, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0009727349703640982, |
|
"loss": 1.3064, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0009716765453005926, |
|
"loss": 1.2186, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0009706181202370872, |
|
"loss": 1.2075, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0009695596951735818, |
|
"loss": 1.2558, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0009685012701100763, |
|
"loss": 1.1401, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0009674428450465708, |
|
"loss": 1.3006, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0009663844199830653, |
|
"loss": 1.4694, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0009653259949195598, |
|
"loss": 1.2387, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0009642675698560543, |
|
"loss": 1.218, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009632091447925487, |
|
"loss": 1.2104, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0009621507197290432, |
|
"loss": 1.1609, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0009610922946655377, |
|
"loss": 1.3551, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0009600338696020322, |
|
"loss": 1.1045, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009589754445385267, |
|
"loss": 1.2547, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009579170194750212, |
|
"loss": 1.1996, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009568585944115157, |
|
"loss": 1.2455, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009558001693480101, |
|
"loss": 1.2218, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0009547417442845046, |
|
"loss": 1.2038, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0009536833192209991, |
|
"loss": 1.3026, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0009526248941574936, |
|
"loss": 1.1764, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0009515664690939881, |
|
"loss": 1.2109, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0009505080440304827, |
|
"loss": 1.1735, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0009494496189669772, |
|
"loss": 1.2407, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0009483911939034717, |
|
"loss": 1.1924, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0009473327688399661, |
|
"loss": 1.1716, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0009462743437764606, |
|
"loss": 1.2708, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0009452159187129551, |
|
"loss": 1.228, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0009441574936494496, |
|
"loss": 1.3187, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0009430990685859442, |
|
"loss": 1.1862, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0009420406435224387, |
|
"loss": 1.1531, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0009409822184589332, |
|
"loss": 1.204, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0009399237933954277, |
|
"loss": 1.1805, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0009388653683319221, |
|
"loss": 1.2059, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0009378069432684166, |
|
"loss": 1.1864, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0009367485182049111, |
|
"loss": 1.305, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0009356900931414056, |
|
"loss": 1.1419, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0009346316680779001, |
|
"loss": 1.1619, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0009335732430143946, |
|
"loss": 1.1292, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.000932514817950889, |
|
"loss": 1.3506, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0009314563928873836, |
|
"loss": 1.2318, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0009303979678238781, |
|
"loss": 1.1838, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0009293395427603726, |
|
"loss": 1.2502, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0009282811176968671, |
|
"loss": 1.2143, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0009272226926333616, |
|
"loss": 1.1603, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0009261642675698561, |
|
"loss": 1.1874, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0009251058425063506, |
|
"loss": 1.1922, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.000924047417442845, |
|
"loss": 1.1762, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0009229889923793395, |
|
"loss": 1.2008, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.000921930567315834, |
|
"loss": 1.366, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0009208721422523285, |
|
"loss": 1.2688, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.000919813717188823, |
|
"loss": 1.1463, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0009187552921253175, |
|
"loss": 1.2489, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.000917696867061812, |
|
"loss": 1.2217, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0009166384419983066, |
|
"loss": 1.1869, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.000915580016934801, |
|
"loss": 1.1489, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0009145215918712955, |
|
"loss": 1.1774, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0009134631668077901, |
|
"loss": 1.05, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0009124047417442846, |
|
"loss": 1.1775, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0009113463166807791, |
|
"loss": 1.2268, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0009102878916172736, |
|
"loss": 1.2144, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0009092294665537681, |
|
"loss": 1.2081, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0009081710414902625, |
|
"loss": 1.1241, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.000907112616426757, |
|
"loss": 1.157, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0009060541913632515, |
|
"loss": 1.1556, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.000904995766299746, |
|
"loss": 1.1942, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0009039373412362405, |
|
"loss": 1.1466, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.000902878916172735, |
|
"loss": 1.1809, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.3190160989761353, |
|
"eval_runtime": 7617.6135, |
|
"eval_samples_per_second": 0.62, |
|
"eval_steps_per_second": 0.078, |
|
"eval_wer": 21.58414639083438, |
|
"step": 2367 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0009018204911092295, |
|
"loss": 1.1354, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.000900762066045724, |
|
"loss": 1.1331, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0008997036409822184, |
|
"loss": 1.0803, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0008986452159187129, |
|
"loss": 1.0812, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0008975867908552074, |
|
"loss": 1.0893, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0008965283657917019, |
|
"loss": 1.0959, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0008954699407281964, |
|
"loss": 1.0893, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.000894411515664691, |
|
"loss": 1.011, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0008933530906011855, |
|
"loss": 1.0836, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0008922946655376799, |
|
"loss": 1.1211, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0008912362404741744, |
|
"loss": 1.0742, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0008901778154106689, |
|
"loss": 1.1619, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0008891193903471635, |
|
"loss": 1.0852, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.000888060965283658, |
|
"loss": 2.875, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0008870025402201525, |
|
"loss": 5.1135, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.000885944115156647, |
|
"loss": 1.3177, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0008848856900931415, |
|
"loss": 1.1476, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0008838272650296359, |
|
"loss": 1.0364, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0008827688399661304, |
|
"loss": 1.1123, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0008817104149026249, |
|
"loss": 1.1715, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0008806519898391194, |
|
"loss": 1.1435, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0008795935647756139, |
|
"loss": 1.1766, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0008785351397121084, |
|
"loss": 1.1142, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0008774767146486029, |
|
"loss": 1.1495, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0008764182895850974, |
|
"loss": 1.0953, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0008753598645215919, |
|
"loss": 1.0642, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0008743014394580864, |
|
"loss": 1.0634, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0008732430143945809, |
|
"loss": 1.1236, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0008721845893310754, |
|
"loss": 1.1002, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0008711261642675699, |
|
"loss": 1.0747, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0008700677392040644, |
|
"loss": 1.1069, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0008690093141405588, |
|
"loss": 1.11, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0008679508890770533, |
|
"loss": 1.0691, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0008668924640135478, |
|
"loss": 1.0444, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0008658340389500423, |
|
"loss": 1.1357, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0008647756138865368, |
|
"loss": 1.0616, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0008637171888230313, |
|
"loss": 1.1194, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0008626587637595258, |
|
"loss": 1.0896, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0008616003386960204, |
|
"loss": 1.1071, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0008605419136325148, |
|
"loss": 1.0941, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0008594834885690093, |
|
"loss": 0.9956, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0008584250635055038, |
|
"loss": 1.0825, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0008573666384419984, |
|
"loss": 1.1306, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0008563082133784929, |
|
"loss": 1.1743, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0008552497883149874, |
|
"loss": 1.1171, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.0008541913632514819, |
|
"loss": 1.1154, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0008531329381879764, |
|
"loss": 1.2423, |
|
"step": 3525 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0008520745131244708, |
|
"loss": 1.0663, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0008510160880609653, |
|
"loss": 1.0785, |
|
"step": 3575 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0008499576629974598, |
|
"loss": 1.1861, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0008488992379339543, |
|
"loss": 1.1114, |
|
"step": 3625 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0008478408128704488, |
|
"loss": 1.1205, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0008467823878069433, |
|
"loss": 1.2242, |
|
"step": 3675 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0008457239627434378, |
|
"loss": 1.1194, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.0008446655376799322, |
|
"loss": 1.0022, |
|
"step": 3725 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0008436071126164267, |
|
"loss": 1.1052, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0008425486875529212, |
|
"loss": 1.0726, |
|
"step": 3775 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0008414902624894157, |
|
"loss": 1.1301, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0008404318374259102, |
|
"loss": 1.1998, |
|
"step": 3825 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0008393734123624047, |
|
"loss": 1.0497, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0008383149872988993, |
|
"loss": 1.0563, |
|
"step": 3875 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0008372565622353937, |
|
"loss": 1.0837, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0008361981371718882, |
|
"loss": 1.0801, |
|
"step": 3925 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0008351397121083828, |
|
"loss": 1.1004, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0008340812870448773, |
|
"loss": 1.1603, |
|
"step": 3975 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0008330228619813718, |
|
"loss": 1.1206, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0008319644369178663, |
|
"loss": 1.041, |
|
"step": 4025 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0008309060118543608, |
|
"loss": 1.0606, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0008298475867908553, |
|
"loss": 1.1693, |
|
"step": 4075 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0008287891617273497, |
|
"loss": 1.1468, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0008277307366638442, |
|
"loss": 1.1067, |
|
"step": 4125 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0008266723116003387, |
|
"loss": 1.1099, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0008256138865368332, |
|
"loss": 1.1336, |
|
"step": 4175 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0008245554614733277, |
|
"loss": 1.0815, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0008234970364098222, |
|
"loss": 1.1248, |
|
"step": 4225 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.0008224386113463167, |
|
"loss": 1.0526, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0008213801862828111, |
|
"loss": 1.117, |
|
"step": 4275 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0008203217612193057, |
|
"loss": 1.1244, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0008192633361558002, |
|
"loss": 1.0995, |
|
"step": 4325 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.0008182049110922947, |
|
"loss": 1.0677, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0008171464860287892, |
|
"loss": 1.1009, |
|
"step": 4375 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0008160880609652837, |
|
"loss": 1.0575, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0008150296359017782, |
|
"loss": 1.1141, |
|
"step": 4425 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0008139712108382727, |
|
"loss": 1.1315, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0008129127857747671, |
|
"loss": 1.1144, |
|
"step": 4475 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.0008118543607112616, |
|
"loss": 1.0226, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0008107959356477561, |
|
"loss": 1.1154, |
|
"step": 4525 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0008097375105842506, |
|
"loss": 1.2119, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.0008086790855207451, |
|
"loss": 1.1489, |
|
"step": 4575 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0008076206604572397, |
|
"loss": 1.1911, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0008065622353937342, |
|
"loss": 1.0795, |
|
"step": 4625 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0008055038103302286, |
|
"loss": 1.2184, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0008044453852667231, |
|
"loss": 1.2255, |
|
"step": 4675 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0008033869602032176, |
|
"loss": 1.2472, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0008023285351397121, |
|
"loss": 1.1805, |
|
"step": 4725 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.2831470966339111, |
|
"eval_runtime": 9835.5257, |
|
"eval_samples_per_second": 0.48, |
|
"eval_steps_per_second": 0.06, |
|
"eval_wer": 72.5011133152504, |
|
"step": 4734 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0008012701100762067, |
|
"loss": 1.0692, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0008002116850127012, |
|
"loss": 1.0087, |
|
"step": 4775 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0007991532599491957, |
|
"loss": 1.0464, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0007980948348856902, |
|
"loss": 1.0134, |
|
"step": 4825 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0007970364098221846, |
|
"loss": 1.0308, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0007959779847586791, |
|
"loss": 1.0106, |
|
"step": 4875 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0007949195596951736, |
|
"loss": 1.0188, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.0007938611346316681, |
|
"loss": 1.0394, |
|
"step": 4925 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.0007928027095681626, |
|
"loss": 1.0306, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0007917442845046571, |
|
"loss": 0.9953, |
|
"step": 4975 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0007906858594411516, |
|
"loss": 0.9624, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.000789627434377646, |
|
"loss": 1.0608, |
|
"step": 5025 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0007885690093141405, |
|
"loss": 0.9998, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.000787510584250635, |
|
"loss": 1.0115, |
|
"step": 5075 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0007864521591871295, |
|
"loss": 1.1508, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.000785393734123624, |
|
"loss": 1.0396, |
|
"step": 5125 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.0007843353090601185, |
|
"loss": 1.0139, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.000783276883996613, |
|
"loss": 1.0031, |
|
"step": 5175 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.0007822184589331076, |
|
"loss": 1.0304, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.000781160033869602, |
|
"loss": 0.9774, |
|
"step": 5225 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0007801016088060966, |
|
"loss": 1.0338, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.0007790431837425911, |
|
"loss": 1.0422, |
|
"step": 5275 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.0007779847586790856, |
|
"loss": 0.9857, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.0007769263336155801, |
|
"loss": 1.0122, |
|
"step": 5325 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0007758679085520746, |
|
"loss": 1.1033, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.0007748094834885691, |
|
"loss": 1.0402, |
|
"step": 5375 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.0007737510584250635, |
|
"loss": 1.0065, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.000772692633361558, |
|
"loss": 1.0225, |
|
"step": 5425 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0007716342082980525, |
|
"loss": 1.0859, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.000770575783234547, |
|
"loss": 1.1253, |
|
"step": 5475 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.0007695173581710415, |
|
"loss": 0.9967, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.000768458933107536, |
|
"loss": 0.9974, |
|
"step": 5525 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.0007674005080440305, |
|
"loss": 0.9759, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.000766342082980525, |
|
"loss": 1.0695, |
|
"step": 5575 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.0007652836579170194, |
|
"loss": 1.0083, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.000764225232853514, |
|
"loss": 1.0405, |
|
"step": 5625 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.0007631668077900085, |
|
"loss": 1.0207, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.000762108382726503, |
|
"loss": 0.9772, |
|
"step": 5675 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0007610499576629975, |
|
"loss": 1.0163, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.000759991532599492, |
|
"loss": 1.006, |
|
"step": 5725 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.0007589331075359865, |
|
"loss": 1.0609, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.0007578746824724809, |
|
"loss": 1.0546, |
|
"step": 5775 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.0007568162574089754, |
|
"loss": 0.9899, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.0007557578323454699, |
|
"loss": 0.9937, |
|
"step": 5825 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0007546994072819644, |
|
"loss": 1.0969, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.000753640982218459, |
|
"loss": 1.0273, |
|
"step": 5875 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.0007525825571549535, |
|
"loss": 1.0957, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.000751524132091448, |
|
"loss": 1.0264, |
|
"step": 5925 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.0007504657070279425, |
|
"loss": 1.0108, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0007494072819644369, |
|
"loss": 1.0734, |
|
"step": 5975 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.0007483488569009314, |
|
"loss": 1.0501, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0007472904318374259, |
|
"loss": 1.0607, |
|
"step": 6025 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.0007462320067739204, |
|
"loss": 1.0588, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.000745173581710415, |
|
"loss": 1.066, |
|
"step": 6075 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.0007441151566469095, |
|
"loss": 1.072, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.000743056731583404, |
|
"loss": 1.0028, |
|
"step": 6125 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.0007419983065198984, |
|
"loss": 1.0594, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.0007409398814563929, |
|
"loss": 1.0995, |
|
"step": 6175 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0007398814563928874, |
|
"loss": 1.0289, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.0007388230313293819, |
|
"loss": 1.0261, |
|
"step": 6225 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.0007377646062658764, |
|
"loss": 1.0534, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.0007367061812023709, |
|
"loss": 1.0424, |
|
"step": 6275 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.0007356477561388654, |
|
"loss": 1.0151, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.0007345893310753598, |
|
"loss": 1.0553, |
|
"step": 6325 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.0007335309060118543, |
|
"loss": 1.0892, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.0007324724809483488, |
|
"loss": 0.9578, |
|
"step": 6375 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0007314140558848433, |
|
"loss": 0.9877, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.0007303556308213378, |
|
"loss": 0.9604, |
|
"step": 6425 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.0007292972057578323, |
|
"loss": 1.0158, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.0007282387806943268, |
|
"loss": 1.0495, |
|
"step": 6475 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.0007271803556308212, |
|
"loss": 1.0623, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.000726121930567316, |
|
"loss": 1.0074, |
|
"step": 6525 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0007250635055038104, |
|
"loss": 1.0418, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.0007240050804403049, |
|
"loss": 1.0479, |
|
"step": 6575 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.0007229466553767994, |
|
"loss": 1.1352, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.0007218882303132939, |
|
"loss": 0.9721, |
|
"step": 6625 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0007208298052497884, |
|
"loss": 1.046, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.0007197713801862829, |
|
"loss": 1.0826, |
|
"step": 6675 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.0007187129551227773, |
|
"loss": 0.9712, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0007176545300592718, |
|
"loss": 1.0559, |
|
"step": 6725 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.0007165961049957663, |
|
"loss": 1.0685, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.0007155376799322608, |
|
"loss": 1.0689, |
|
"step": 6775 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.0007144792548687553, |
|
"loss": 1.0437, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.0007134208298052498, |
|
"loss": 1.0884, |
|
"step": 6825 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.0007123624047417443, |
|
"loss": 1.0217, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.0007113039796782388, |
|
"loss": 1.1075, |
|
"step": 6875 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.0007102455546147332, |
|
"loss": 1.0848, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.0007091871295512277, |
|
"loss": 1.0616, |
|
"step": 6925 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.0007081287044877223, |
|
"loss": 1.0098, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.0007070702794242168, |
|
"loss": 1.0668, |
|
"step": 6975 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.0007060118543607113, |
|
"loss": 1.0463, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.0007049534292972058, |
|
"loss": 0.9984, |
|
"step": 7025 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.0007038950042337003, |
|
"loss": 1.069, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.0007028365791701947, |
|
"loss": 1.0914, |
|
"step": 7075 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0007017781541066892, |
|
"loss": 1.013, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.2100714445114136, |
|
"eval_runtime": 4312.9003, |
|
"eval_samples_per_second": 1.095, |
|
"eval_steps_per_second": 0.137, |
|
"eval_wer": 46.87866887980244, |
|
"step": 7101 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.0007007197290431837, |
|
"loss": 1.0658, |
|
"step": 7125 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.0006997036409822185, |
|
"loss": 0.9727, |
|
"step": 7150 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 0.000698645215918713, |
|
"loss": 0.9168, |
|
"step": 7175 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.0006975867908552075, |
|
"loss": 0.9955, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.000696528365791702, |
|
"loss": 0.9705, |
|
"step": 7225 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.0006954699407281965, |
|
"loss": 0.9692, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.000694411515664691, |
|
"loss": 0.9555, |
|
"step": 7275 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.0006933530906011855, |
|
"loss": 0.9421, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.00069229466553768, |
|
"loss": 0.9501, |
|
"step": 7325 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.0006912362404741744, |
|
"loss": 0.9575, |
|
"step": 7350 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.0006901778154106689, |
|
"loss": 0.9371, |
|
"step": 7375 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.0006891193903471634, |
|
"loss": 0.9485, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.0006880609652836579, |
|
"loss": 0.9627, |
|
"step": 7425 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.0006870025402201524, |
|
"loss": 0.9501, |
|
"step": 7450 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.0006859441151566469, |
|
"loss": 0.9377, |
|
"step": 7475 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 0.0006848856900931414, |
|
"loss": 0.9765, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.0006838272650296358, |
|
"loss": 0.9471, |
|
"step": 7525 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 0.0006827688399661303, |
|
"loss": 1.0329, |
|
"step": 7550 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.0006817104149026248, |
|
"loss": 1.0231, |
|
"step": 7575 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.0006806519898391193, |
|
"loss": 0.9933, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 0.0006795935647756139, |
|
"loss": 0.9411, |
|
"step": 7625 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.0006785351397121085, |
|
"loss": 0.9513, |
|
"step": 7650 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.000677476714648603, |
|
"loss": 0.9823, |
|
"step": 7675 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.0006764182895850975, |
|
"loss": 0.9649, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.000675359864521592, |
|
"loss": 0.9613, |
|
"step": 7725 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.0006743014394580864, |
|
"loss": 1.0052, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.0006732430143945809, |
|
"loss": 1.035, |
|
"step": 7775 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.0006721845893310754, |
|
"loss": 0.9563, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 0.0006711261642675699, |
|
"loss": 0.9533, |
|
"step": 7825 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.0006700677392040644, |
|
"loss": 1.0112, |
|
"step": 7850 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.0006690093141405589, |
|
"loss": 0.9479, |
|
"step": 7875 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.0006679508890770534, |
|
"loss": 0.9666, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.0006668924640135478, |
|
"loss": 0.9874, |
|
"step": 7925 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.0006658340389500423, |
|
"loss": 1.0469, |
|
"step": 7950 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 0.0006647756138865368, |
|
"loss": 1.056, |
|
"step": 7975 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.0006637171888230313, |
|
"loss": 0.9862, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.0006626587637595258, |
|
"loss": 0.9715, |
|
"step": 8025 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.0006616003386960203, |
|
"loss": 0.9228, |
|
"step": 8050 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 0.0006605419136325149, |
|
"loss": 0.9851, |
|
"step": 8075 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.0006594834885690093, |
|
"loss": 0.9781, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.0006584250635055038, |
|
"loss": 0.9534, |
|
"step": 8125 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.0006573666384419983, |
|
"loss": 1.061, |
|
"step": 8150 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.0006563082133784928, |
|
"loss": 0.931, |
|
"step": 8175 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.0006552497883149873, |
|
"loss": 0.9586, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.0006541913632514818, |
|
"loss": 1.0125, |
|
"step": 8225 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 0.0006531329381879763, |
|
"loss": 0.9899, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.0006520745131244707, |
|
"loss": 0.962, |
|
"step": 8275 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.0006510160880609653, |
|
"loss": 0.9799, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.0006499576629974598, |
|
"loss": 0.9534, |
|
"step": 8325 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.0006488992379339543, |
|
"loss": 0.9681, |
|
"step": 8350 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 0.0006478408128704488, |
|
"loss": 0.9591, |
|
"step": 8375 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.0006467823878069433, |
|
"loss": 1.021, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 0.0006457239627434378, |
|
"loss": 1.0503, |
|
"step": 8425 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.0006446655376799323, |
|
"loss": 0.957, |
|
"step": 8450 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.0006436071126164267, |
|
"loss": 1.0219, |
|
"step": 8475 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 0.0006425486875529213, |
|
"loss": 0.968, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.0006414902624894158, |
|
"loss": 0.9812, |
|
"step": 8525 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 0.0006404318374259103, |
|
"loss": 0.9924, |
|
"step": 8550 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.0006393734123624048, |
|
"loss": 0.9772, |
|
"step": 8575 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 0.0006383149872988993, |
|
"loss": 1.0635, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.0006372565622353938, |
|
"loss": 1.05, |
|
"step": 8625 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.0006361981371718883, |
|
"loss": 1.0148, |
|
"step": 8650 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 0.0006351397121083827, |
|
"loss": 0.9081, |
|
"step": 8675 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.0006340812870448772, |
|
"loss": 0.991, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 0.0006330228619813717, |
|
"loss": 0.9306, |
|
"step": 8725 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.0006319644369178662, |
|
"loss": 0.9934, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.0006309060118543607, |
|
"loss": 0.8858, |
|
"step": 8775 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.0006298475867908552, |
|
"loss": 0.9914, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.0006287891617273497, |
|
"loss": 0.9792, |
|
"step": 8825 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.0006277307366638441, |
|
"loss": 0.9537, |
|
"step": 8850 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.0006266723116003386, |
|
"loss": 1.0256, |
|
"step": 8875 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.0006256138865368331, |
|
"loss": 1.019, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 0.0006245554614733276, |
|
"loss": 0.9613, |
|
"step": 8925 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.0006234970364098223, |
|
"loss": 0.9606, |
|
"step": 8950 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.0006224386113463168, |
|
"loss": 1.0394, |
|
"step": 8975 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.0006213801862828113, |
|
"loss": 1.0117, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.0006203217612193058, |
|
"loss": 0.9886, |
|
"step": 9025 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.0006192633361558002, |
|
"loss": 1.0083, |
|
"step": 9050 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.0006182049110922947, |
|
"loss": 0.9849, |
|
"step": 9075 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.0006171464860287892, |
|
"loss": 0.9729, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 0.0006160880609652837, |
|
"loss": 0.9799, |
|
"step": 9125 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.0006150296359017782, |
|
"loss": 1.0039, |
|
"step": 9150 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.0006139712108382727, |
|
"loss": 1.0674, |
|
"step": 9175 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 0.0006129127857747672, |
|
"loss": 0.9812, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.0006118543607112616, |
|
"loss": 0.9987, |
|
"step": 9225 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.0006107959356477561, |
|
"loss": 0.9258, |
|
"step": 9250 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 0.0006097375105842506, |
|
"loss": 0.9777, |
|
"step": 9275 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.0006086790855207451, |
|
"loss": 0.9997, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 0.0006076206604572396, |
|
"loss": 0.9908, |
|
"step": 9325 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.0006065622353937341, |
|
"loss": 0.998, |
|
"step": 9350 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 0.0006055038103302287, |
|
"loss": 0.954, |
|
"step": 9375 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 0.0006044453852667232, |
|
"loss": 1.0006, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.0006033869602032176, |
|
"loss": 0.986, |
|
"step": 9425 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.0006023285351397121, |
|
"loss": 0.9995, |
|
"step": 9450 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.2009096145629883, |
|
"eval_runtime": 4732.0809, |
|
"eval_samples_per_second": 0.998, |
|
"eval_steps_per_second": 0.125, |
|
"eval_wer": 29.031213311201974, |
|
"step": 9468 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.0006012701100762066, |
|
"loss": 0.9877, |
|
"step": 9475 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 0.0006002116850127011, |
|
"loss": 0.9236, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.0005991532599491956, |
|
"loss": 0.9548, |
|
"step": 9525 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 0.0005980948348856901, |
|
"loss": 0.9139, |
|
"step": 9550 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 0.0005970364098221847, |
|
"loss": 0.898, |
|
"step": 9575 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 0.0005959779847586791, |
|
"loss": 0.8503, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 0.0005949195596951736, |
|
"loss": 0.9089, |
|
"step": 9625 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 0.0005938611346316681, |
|
"loss": 0.9742, |
|
"step": 9650 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 0.0005928027095681626, |
|
"loss": 0.9052, |
|
"step": 9675 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 0.0005917442845046571, |
|
"loss": 0.9267, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 0.0005906858594411516, |
|
"loss": 0.8613, |
|
"step": 9725 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 0.0005896274343776461, |
|
"loss": 0.9189, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 0.0005885690093141405, |
|
"loss": 0.9323, |
|
"step": 9775 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 0.000587510584250635, |
|
"loss": 0.9681, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 0.0005864521591871296, |
|
"loss": 0.9357, |
|
"step": 9825 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 0.0005853937341236241, |
|
"loss": 0.9255, |
|
"step": 9850 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 0.0005843353090601186, |
|
"loss": 0.8853, |
|
"step": 9875 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 0.0005833192209991532, |
|
"loss": 1.0524, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 0.0005822607959356477, |
|
"loss": 0.9598, |
|
"step": 9925 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 0.0005812023708721422, |
|
"loss": 0.9312, |
|
"step": 9950 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 0.0005801439458086367, |
|
"loss": 0.9171, |
|
"step": 9975 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 0.0005790855207451312, |
|
"loss": 0.9126, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 0.0005780270956816257, |
|
"loss": 0.8642, |
|
"step": 10025 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 0.0005769686706181202, |
|
"loss": 0.9462, |
|
"step": 10050 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 0.0005759102455546148, |
|
"loss": 0.8599, |
|
"step": 10075 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 0.0005748518204911093, |
|
"loss": 0.8809, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 0.0005737933954276038, |
|
"loss": 0.9371, |
|
"step": 10125 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 0.0005727349703640983, |
|
"loss": 0.8878, |
|
"step": 10150 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 0.0005716765453005928, |
|
"loss": 0.8485, |
|
"step": 10175 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 0.0005706181202370873, |
|
"loss": 0.8717, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 0.0005695596951735818, |
|
"loss": 0.9409, |
|
"step": 10225 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 0.0005685012701100762, |
|
"loss": 0.8741, |
|
"step": 10250 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 0.0005674428450465707, |
|
"loss": 0.9594, |
|
"step": 10275 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 0.0005663844199830652, |
|
"loss": 0.9609, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 0.0005653259949195597, |
|
"loss": 0.9146, |
|
"step": 10325 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 0.0005642675698560542, |
|
"loss": 0.9324, |
|
"step": 10350 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 0.0005632091447925487, |
|
"loss": 0.9157, |
|
"step": 10375 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 0.0005621507197290432, |
|
"loss": 0.9275, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 0.0005610922946655376, |
|
"loss": 0.9113, |
|
"step": 10425 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 0.0005600338696020321, |
|
"loss": 0.9281, |
|
"step": 10450 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 0.0005589754445385266, |
|
"loss": 0.9332, |
|
"step": 10475 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 0.0005579170194750212, |
|
"loss": 0.902, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 0.0005568585944115157, |
|
"loss": 0.9507, |
|
"step": 10525 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 0.0005558001693480102, |
|
"loss": 1.015, |
|
"step": 10550 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 0.0005547417442845047, |
|
"loss": 0.9087, |
|
"step": 10575 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 0.0005536833192209992, |
|
"loss": 0.9933, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 0.0005526248941574936, |
|
"loss": 0.9266, |
|
"step": 10625 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 0.0005515664690939881, |
|
"loss": 0.928, |
|
"step": 10650 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 0.0005505080440304826, |
|
"loss": 0.973, |
|
"step": 10675 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 0.0005494496189669772, |
|
"loss": 0.9384, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 0.0005483911939034717, |
|
"loss": 0.9409, |
|
"step": 10725 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 0.0005473327688399662, |
|
"loss": 0.908, |
|
"step": 10750 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 0.0005462743437764607, |
|
"loss": 0.9074, |
|
"step": 10775 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 0.0005452159187129551, |
|
"loss": 0.9468, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 0.0005441574936494496, |
|
"loss": 0.9009, |
|
"step": 10825 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 0.0005430990685859441, |
|
"loss": 0.9129, |
|
"step": 10850 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 0.0005420406435224386, |
|
"loss": 0.962, |
|
"step": 10875 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 0.0005409822184589331, |
|
"loss": 0.937, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 0.0005399237933954277, |
|
"loss": 0.8833, |
|
"step": 10925 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 0.0005388653683319222, |
|
"loss": 0.9346, |
|
"step": 10950 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 0.0005378069432684167, |
|
"loss": 0.9755, |
|
"step": 10975 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 0.0005367485182049111, |
|
"loss": 0.9885, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 0.0005356900931414056, |
|
"loss": 0.9258, |
|
"step": 11025 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 0.0005346316680779001, |
|
"loss": 0.9542, |
|
"step": 11050 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 0.0005335732430143946, |
|
"loss": 0.9243, |
|
"step": 11075 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 0.0005325148179508891, |
|
"loss": 0.9488, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 0.0005314563928873836, |
|
"loss": 0.919, |
|
"step": 11125 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 0.0005303979678238781, |
|
"loss": 0.9595, |
|
"step": 11150 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 0.0005293395427603725, |
|
"loss": 0.9857, |
|
"step": 11175 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 0.000528281117696867, |
|
"loss": 0.8741, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 0.0005272226926333615, |
|
"loss": 0.9894, |
|
"step": 11225 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 0.000526164267569856, |
|
"loss": 0.9435, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 0.0005251058425063505, |
|
"loss": 0.958, |
|
"step": 11275 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 0.000524047417442845, |
|
"loss": 0.9052, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 0.0005229889923793395, |
|
"loss": 0.9745, |
|
"step": 11325 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.000521930567315834, |
|
"loss": 0.9233, |
|
"step": 11350 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 0.0005208721422523286, |
|
"loss": 0.9504, |
|
"step": 11375 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 0.0005198137171888231, |
|
"loss": 0.9513, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 0.0005187552921253176, |
|
"loss": 0.8862, |
|
"step": 11425 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 0.0005176968670618121, |
|
"loss": 0.9127, |
|
"step": 11450 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 0.0005166384419983066, |
|
"loss": 0.9122, |
|
"step": 11475 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 0.0005155800169348011, |
|
"loss": 0.8923, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 0.0005145215918712956, |
|
"loss": 0.9599, |
|
"step": 11525 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 0.00051346316680779, |
|
"loss": 0.908, |
|
"step": 11550 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 0.0005124047417442845, |
|
"loss": 0.9405, |
|
"step": 11575 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 0.000511346316680779, |
|
"loss": 0.95, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 0.0005102878916172735, |
|
"loss": 0.9096, |
|
"step": 11625 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 0.000509229466553768, |
|
"loss": 0.8797, |
|
"step": 11650 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 0.0005081710414902625, |
|
"loss": 0.9073, |
|
"step": 11675 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 0.000507112616426757, |
|
"loss": 0.9584, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 0.0005060541913632514, |
|
"loss": 0.9772, |
|
"step": 11725 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 0.0005049957662997459, |
|
"loss": 0.8993, |
|
"step": 11750 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 0.0005039373412362404, |
|
"loss": 0.9135, |
|
"step": 11775 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.0005028789161727349, |
|
"loss": 0.922, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.0005018204911092295, |
|
"loss": 0.9102, |
|
"step": 11825 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.170865774154663, |
|
"eval_runtime": 4761.8546, |
|
"eval_samples_per_second": 0.992, |
|
"eval_steps_per_second": 0.124, |
|
"eval_wer": 27.24788470102425, |
|
"step": 11835 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 0.000500762066045724, |
|
"loss": 0.9302, |
|
"step": 11850 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 0.0004997036409822185, |
|
"loss": 0.8748, |
|
"step": 11875 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 0.000498645215918713, |
|
"loss": 0.8842, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 0.0004975867908552074, |
|
"loss": 0.8762, |
|
"step": 11925 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 0.0004965283657917019, |
|
"loss": 0.8903, |
|
"step": 11950 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 0.0004954699407281964, |
|
"loss": 0.8895, |
|
"step": 11975 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 0.0004944115156646909, |
|
"loss": 0.9031, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 0.0004933530906011854, |
|
"loss": 0.8865, |
|
"step": 12025 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 0.00049229466553768, |
|
"loss": 0.853, |
|
"step": 12050 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 0.0004912362404741745, |
|
"loss": 0.8781, |
|
"step": 12075 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 0.000490177815410669, |
|
"loss": 0.8667, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 0.0004891193903471634, |
|
"loss": 0.8595, |
|
"step": 12125 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 0.000488060965283658, |
|
"loss": 0.8717, |
|
"step": 12150 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 0.00048700254022015246, |
|
"loss": 0.8627, |
|
"step": 12175 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"learning_rate": 0.00048594411515664695, |
|
"loss": 0.8646, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 0.00048488569009314143, |
|
"loss": 0.901, |
|
"step": 12225 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 0.0004838272650296359, |
|
"loss": 0.8932, |
|
"step": 12250 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 0.0004827688399661304, |
|
"loss": 0.8447, |
|
"step": 12275 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 0.0004817104149026249, |
|
"loss": 0.8813, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 0.0004806519898391194, |
|
"loss": 0.8897, |
|
"step": 12325 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 0.00047959356477561386, |
|
"loss": 0.8765, |
|
"step": 12350 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 0.0004785351397121084, |
|
"loss": 0.917, |
|
"step": 12375 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 0.0004774767146486029, |
|
"loss": 0.8642, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 0.0004764182895850974, |
|
"loss": 0.8903, |
|
"step": 12425 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 0.00047535986452159186, |
|
"loss": 0.8825, |
|
"step": 12450 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 0.0004743014394580864, |
|
"loss": 0.864, |
|
"step": 12475 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 0.0004732430143945809, |
|
"loss": 0.9053, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.00047218458933107537, |
|
"loss": 0.8633, |
|
"step": 12525 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 0.00047112616426756986, |
|
"loss": 0.8638, |
|
"step": 12550 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 0.00047006773920406434, |
|
"loss": 0.818, |
|
"step": 12575 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 0.0004690093141405589, |
|
"loss": 0.8435, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 0.00046795088907705337, |
|
"loss": 0.8343, |
|
"step": 12625 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 0.00046689246401354786, |
|
"loss": 0.8535, |
|
"step": 12650 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 0.00046583403895004234, |
|
"loss": 0.8622, |
|
"step": 12675 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 0.00046477561388653683, |
|
"loss": 0.9058, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 0.0004637171888230313, |
|
"loss": 0.8637, |
|
"step": 12725 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 0.0004626587637595258, |
|
"loss": 0.8698, |
|
"step": 12750 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 0.0004616003386960203, |
|
"loss": 0.8424, |
|
"step": 12775 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 0.0004605419136325149, |
|
"loss": 0.8894, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 0.00045948348856900936, |
|
"loss": 0.8493, |
|
"step": 12825 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 0.00045842506350550385, |
|
"loss": 0.8802, |
|
"step": 12850 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 0.00045736663844199834, |
|
"loss": 0.9134, |
|
"step": 12875 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 0.0004563082133784928, |
|
"loss": 0.8727, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 0.0004552497883149873, |
|
"loss": 0.905, |
|
"step": 12925 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 0.0004541913632514818, |
|
"loss": 0.8687, |
|
"step": 12950 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"learning_rate": 0.0004531329381879763, |
|
"loss": 0.9142, |
|
"step": 12975 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 0.00045207451312447077, |
|
"loss": 0.8446, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 0.0004510160880609653, |
|
"loss": 0.8934, |
|
"step": 13025 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 0.0004499576629974598, |
|
"loss": 0.8856, |
|
"step": 13050 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 0.0004488992379339543, |
|
"loss": 0.8806, |
|
"step": 13075 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 0.00044784081287044876, |
|
"loss": 0.9213, |
|
"step": 13100 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 0.0004467823878069433, |
|
"loss": 0.8352, |
|
"step": 13125 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 0.0004457239627434378, |
|
"loss": 0.8827, |
|
"step": 13150 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 0.0004446655376799323, |
|
"loss": 0.872, |
|
"step": 13175 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 0.00044360711261642676, |
|
"loss": 0.895, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 0.00044254868755292125, |
|
"loss": 0.936, |
|
"step": 13225 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 0.0004414902624894158, |
|
"loss": 0.8979, |
|
"step": 13250 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 0.00044043183742591027, |
|
"loss": 0.8698, |
|
"step": 13275 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 0.00043937341236240476, |
|
"loss": 0.8921, |
|
"step": 13300 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 0.00043831498729889924, |
|
"loss": 0.9028, |
|
"step": 13325 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 0.00043725656223539373, |
|
"loss": 0.8962, |
|
"step": 13350 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 0.0004361981371718882, |
|
"loss": 0.9641, |
|
"step": 13375 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 0.0004351397121083827, |
|
"loss": 0.8767, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 0.0004340812870448772, |
|
"loss": 0.9176, |
|
"step": 13425 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 0.0004330228619813717, |
|
"loss": 0.8864, |
|
"step": 13450 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 0.00043196443691786627, |
|
"loss": 0.8844, |
|
"step": 13475 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 0.00043090601185436075, |
|
"loss": 0.8831, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 0.00042984758679085524, |
|
"loss": 0.9046, |
|
"step": 13525 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 0.0004287891617273497, |
|
"loss": 0.8509, |
|
"step": 13550 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 0.0004277307366638442, |
|
"loss": 0.9291, |
|
"step": 13575 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 0.0004266723116003387, |
|
"loss": 0.8443, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 0.0004256138865368332, |
|
"loss": 0.8935, |
|
"step": 13625 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 0.00042455546147332767, |
|
"loss": 0.8912, |
|
"step": 13650 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 0.00042349703640982215, |
|
"loss": 0.9162, |
|
"step": 13675 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 0.0004224386113463167, |
|
"loss": 0.8835, |
|
"step": 13700 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 0.0004213801862828112, |
|
"loss": 0.8494, |
|
"step": 13725 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 0.0004203217612193057, |
|
"loss": 0.8818, |
|
"step": 13750 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 0.0004192633361558002, |
|
"loss": 0.9461, |
|
"step": 13775 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 0.0004182049110922947, |
|
"loss": 0.8506, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 0.0004171464860287892, |
|
"loss": 0.8769, |
|
"step": 13825 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 0.00041608806096528366, |
|
"loss": 0.8792, |
|
"step": 13850 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 0.00041502963590177815, |
|
"loss": 0.9077, |
|
"step": 13875 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 0.0004139712108382727, |
|
"loss": 0.8636, |
|
"step": 13900 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.0004129127857747672, |
|
"loss": 0.8776, |
|
"step": 13925 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 0.00041185436071126166, |
|
"loss": 0.8983, |
|
"step": 13950 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 0.00041079593564775614, |
|
"loss": 0.8978, |
|
"step": 13975 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 0.00040973751058425063, |
|
"loss": 0.875, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 0.0004086790855207451, |
|
"loss": 0.8698, |
|
"step": 14025 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 0.0004076206604572396, |
|
"loss": 0.8782, |
|
"step": 14050 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 0.00040656223539373414, |
|
"loss": 0.8637, |
|
"step": 14075 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 0.00040550381033022863, |
|
"loss": 0.9123, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 0.00040444538526672317, |
|
"loss": 0.8662, |
|
"step": 14125 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 0.00040338696020321765, |
|
"loss": 0.9166, |
|
"step": 14150 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 0.00040232853513971214, |
|
"loss": 0.8683, |
|
"step": 14175 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 0.0004012701100762066, |
|
"loss": 0.9044, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.1590253114700317, |
|
"eval_runtime": 4555.39, |
|
"eval_samples_per_second": 1.037, |
|
"eval_steps_per_second": 0.13, |
|
"eval_wer": 33.9115825270232, |
|
"step": 14202 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 0.0004002116850127011, |
|
"loss": 0.8429, |
|
"step": 14225 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 0.0003991532599491956, |
|
"loss": 0.8537, |
|
"step": 14250 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 0.0003980948348856901, |
|
"loss": 0.8102, |
|
"step": 14275 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 0.00039703640982218457, |
|
"loss": 0.8547, |
|
"step": 14300 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 0.00039597798475867905, |
|
"loss": 0.8256, |
|
"step": 14325 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 0.0003949195596951736, |
|
"loss": 0.8556, |
|
"step": 14350 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 0.0003938611346316681, |
|
"loss": 0.8445, |
|
"step": 14375 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 0.0003928027095681626, |
|
"loss": 0.839, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 0.0003917442845046571, |
|
"loss": 0.7951, |
|
"step": 14425 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 0.0003906858594411516, |
|
"loss": 0.8409, |
|
"step": 14450 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 0.0003896274343776461, |
|
"loss": 0.8427, |
|
"step": 14475 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 0.00038856900931414056, |
|
"loss": 0.8153, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 0.00038751058425063505, |
|
"loss": 0.8264, |
|
"step": 14525 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 0.00038645215918712953, |
|
"loss": 0.8903, |
|
"step": 14550 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 0.0003853937341236241, |
|
"loss": 0.8659, |
|
"step": 14575 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 0.00038433530906011856, |
|
"loss": 0.858, |
|
"step": 14600 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 0.00038327688399661305, |
|
"loss": 0.8571, |
|
"step": 14625 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 0.00038221845893310753, |
|
"loss": 0.847, |
|
"step": 14650 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 0.000381160033869602, |
|
"loss": 0.8815, |
|
"step": 14675 |
|
}, |
|
{ |
|
"epoch": 6.21, |
|
"learning_rate": 0.0003801016088060965, |
|
"loss": 0.8295, |
|
"step": 14700 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 0.00037904318374259104, |
|
"loss": 0.8391, |
|
"step": 14725 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"learning_rate": 0.00037798475867908553, |
|
"loss": 0.8106, |
|
"step": 14750 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 0.00037692633361558, |
|
"loss": 0.8494, |
|
"step": 14775 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 0.00037586790855207456, |
|
"loss": 0.7969, |
|
"step": 14800 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 0.00037480948348856904, |
|
"loss": 0.8104, |
|
"step": 14825 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 0.0003737510584250635, |
|
"loss": 0.8214, |
|
"step": 14850 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 0.000372692633361558, |
|
"loss": 0.8566, |
|
"step": 14875 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 0.0003716342082980525, |
|
"loss": 0.8315, |
|
"step": 14900 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"learning_rate": 0.000370575783234547, |
|
"loss": 0.8438, |
|
"step": 14925 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 0.00036951735817104147, |
|
"loss": 0.8244, |
|
"step": 14950 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 0.00036845893310753596, |
|
"loss": 0.8052, |
|
"step": 14975 |
|
}, |
|
{ |
|
"epoch": 6.34, |
|
"learning_rate": 0.00036740050804403044, |
|
"loss": 0.8215, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 0.000366342082980525, |
|
"loss": 0.8244, |
|
"step": 15025 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"learning_rate": 0.0003652836579170195, |
|
"loss": 0.8554, |
|
"step": 15050 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 0.000364225232853514, |
|
"loss": 0.837, |
|
"step": 15075 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"learning_rate": 0.0003631668077900085, |
|
"loss": 0.8483, |
|
"step": 15100 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 0.000362108382726503, |
|
"loss": 0.8399, |
|
"step": 15125 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 0.00036104995766299747, |
|
"loss": 0.8631, |
|
"step": 15150 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 0.00035999153259949195, |
|
"loss": 0.8093, |
|
"step": 15175 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 0.00035893310753598644, |
|
"loss": 0.8194, |
|
"step": 15200 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 0.000357874682472481, |
|
"loss": 0.8645, |
|
"step": 15225 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"learning_rate": 0.00035681625740897546, |
|
"loss": 0.8283, |
|
"step": 15250 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 0.00035575783234546995, |
|
"loss": 0.8526, |
|
"step": 15275 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 0.00035469940728196443, |
|
"loss": 0.8235, |
|
"step": 15300 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"learning_rate": 0.0003536409822184589, |
|
"loss": 0.8818, |
|
"step": 15325 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 0.0003525825571549534, |
|
"loss": 0.8597, |
|
"step": 15350 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 0.00035152413209144795, |
|
"loss": 0.8344, |
|
"step": 15375 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 0.00035046570702794243, |
|
"loss": 0.8545, |
|
"step": 15400 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"learning_rate": 0.0003494072819644369, |
|
"loss": 0.8534, |
|
"step": 15425 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 0.00034834885690093146, |
|
"loss": 0.8258, |
|
"step": 15450 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"learning_rate": 0.00034729043183742594, |
|
"loss": 0.8462, |
|
"step": 15475 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 0.00034623200677392043, |
|
"loss": 0.9024, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 0.0003451735817104149, |
|
"loss": 0.8388, |
|
"step": 15525 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 0.0003441151566469094, |
|
"loss": 0.8628, |
|
"step": 15550 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 0.0003430567315834039, |
|
"loss": 0.8322, |
|
"step": 15575 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 0.00034199830651989837, |
|
"loss": 0.85, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 0.00034093988145639286, |
|
"loss": 0.8186, |
|
"step": 15625 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 0.00033988145639288734, |
|
"loss": 0.8289, |
|
"step": 15650 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"learning_rate": 0.00033882303132938194, |
|
"loss": 0.8344, |
|
"step": 15675 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 0.0003377646062658764, |
|
"loss": 0.8698, |
|
"step": 15700 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 0.0003367061812023709, |
|
"loss": 0.8414, |
|
"step": 15725 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 0.0003356477561388654, |
|
"loss": 0.8704, |
|
"step": 15750 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 0.0003345893310753599, |
|
"loss": 0.8711, |
|
"step": 15775 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 0.00033353090601185437, |
|
"loss": 0.8546, |
|
"step": 15800 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"learning_rate": 0.00033247248094834885, |
|
"loss": 0.8396, |
|
"step": 15825 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 0.00033141405588484334, |
|
"loss": 0.8935, |
|
"step": 15850 |
|
}, |
|
{ |
|
"epoch": 6.71, |
|
"learning_rate": 0.0003303556308213378, |
|
"loss": 0.8649, |
|
"step": 15875 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 0.00032929720575783236, |
|
"loss": 0.8264, |
|
"step": 15900 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 0.00032823878069432685, |
|
"loss": 0.8468, |
|
"step": 15925 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"learning_rate": 0.00032718035563082134, |
|
"loss": 0.8418, |
|
"step": 15950 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 0.0003261219305673158, |
|
"loss": 0.8974, |
|
"step": 15975 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 0.00032506350550381036, |
|
"loss": 0.8239, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 6.77, |
|
"learning_rate": 0.00032400508044030485, |
|
"loss": 0.8184, |
|
"step": 16025 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 0.00032294665537679933, |
|
"loss": 0.8399, |
|
"step": 16050 |
|
}, |
|
{ |
|
"epoch": 6.79, |
|
"learning_rate": 0.0003218882303132938, |
|
"loss": 0.8312, |
|
"step": 16075 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 0.0003208298052497883, |
|
"loss": 0.8497, |
|
"step": 16100 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 0.00031977138018628284, |
|
"loss": 0.8752, |
|
"step": 16125 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 0.00031871295512277733, |
|
"loss": 0.8138, |
|
"step": 16150 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 0.0003176545300592718, |
|
"loss": 0.8271, |
|
"step": 16175 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 0.0003165961049957663, |
|
"loss": 0.8722, |
|
"step": 16200 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 0.0003155376799322608, |
|
"loss": 0.8186, |
|
"step": 16225 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"learning_rate": 0.0003144792548687553, |
|
"loss": 0.8512, |
|
"step": 16250 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 0.00031342082980524976, |
|
"loss": 0.8943, |
|
"step": 16275 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 0.00031236240474174425, |
|
"loss": 0.788, |
|
"step": 16300 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 0.00031130397967823884, |
|
"loss": 0.8147, |
|
"step": 16325 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 0.0003102455546147333, |
|
"loss": 0.8541, |
|
"step": 16350 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"learning_rate": 0.0003091871295512278, |
|
"loss": 0.8571, |
|
"step": 16375 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 0.0003081287044877223, |
|
"loss": 0.8386, |
|
"step": 16400 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"learning_rate": 0.0003070702794242168, |
|
"loss": 0.8413, |
|
"step": 16425 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 0.00030601185436071127, |
|
"loss": 0.8362, |
|
"step": 16450 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 0.00030495342929720575, |
|
"loss": 0.8559, |
|
"step": 16475 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 0.00030389500423370024, |
|
"loss": 0.85, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"learning_rate": 0.0003028365791701947, |
|
"loss": 0.8117, |
|
"step": 16525 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"learning_rate": 0.00030177815410668927, |
|
"loss": 0.8633, |
|
"step": 16550 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.1495270729064941, |
|
"eval_runtime": 4489.4556, |
|
"eval_samples_per_second": 1.052, |
|
"eval_steps_per_second": 0.132, |
|
"eval_wer": 30.79429982591798, |
|
"step": 16569 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 0.00030071972904318375, |
|
"loss": 0.8121, |
|
"step": 16575 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 0.00029966130397967824, |
|
"loss": 0.8392, |
|
"step": 16600 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 0.0002986028789161727, |
|
"loss": 0.8302, |
|
"step": 16625 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 0.00029754445385266726, |
|
"loss": 0.8181, |
|
"step": 16650 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 0.00029648602878916175, |
|
"loss": 0.7676, |
|
"step": 16675 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 0.00029542760372565623, |
|
"loss": 0.8366, |
|
"step": 16700 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 0.0002943691786621507, |
|
"loss": 0.8093, |
|
"step": 16725 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 0.0002933107535986452, |
|
"loss": 0.7931, |
|
"step": 16750 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 0.00029225232853513975, |
|
"loss": 0.7998, |
|
"step": 16775 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 0.00029119390347163423, |
|
"loss": 0.7657, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 0.0002901354784081287, |
|
"loss": 0.8145, |
|
"step": 16825 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"learning_rate": 0.0002890770533446232, |
|
"loss": 0.8068, |
|
"step": 16850 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 0.0002880186282811177, |
|
"loss": 0.8559, |
|
"step": 16875 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 0.0002869602032176122, |
|
"loss": 0.8337, |
|
"step": 16900 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 0.00028590177815410666, |
|
"loss": 0.8231, |
|
"step": 16925 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 0.00028484335309060115, |
|
"loss": 0.8278, |
|
"step": 16950 |
|
}, |
|
{ |
|
"epoch": 7.17, |
|
"learning_rate": 0.0002837849280270957, |
|
"loss": 0.7838, |
|
"step": 16975 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"learning_rate": 0.0002827265029635902, |
|
"loss": 0.8531, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 0.0002816680779000847, |
|
"loss": 0.781, |
|
"step": 17025 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 0.0002806096528365792, |
|
"loss": 0.8274, |
|
"step": 17050 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 0.0002795512277730737, |
|
"loss": 0.8052, |
|
"step": 17075 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 0.00027849280270956817, |
|
"loss": 0.8197, |
|
"step": 17100 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 0.00027743437764606266, |
|
"loss": 0.7912, |
|
"step": 17125 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 0.00027637595258255714, |
|
"loss": 0.7939, |
|
"step": 17150 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 0.00027531752751905163, |
|
"loss": 0.7834, |
|
"step": 17175 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 0.0002742591024555461, |
|
"loss": 0.7949, |
|
"step": 17200 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 0.00027320067739204065, |
|
"loss": 0.7883, |
|
"step": 17225 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 0.00027214225232853514, |
|
"loss": 0.8239, |
|
"step": 17250 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 0.0002710838272650296, |
|
"loss": 0.7861, |
|
"step": 17275 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 0.00027002540220152416, |
|
"loss": 0.8098, |
|
"step": 17300 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 0.00026896697713801865, |
|
"loss": 0.8416, |
|
"step": 17325 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 0.00026795088907705333, |
|
"loss": 0.8691, |
|
"step": 17350 |
|
}, |
|
{ |
|
"epoch": 7.34, |
|
"learning_rate": 0.0002668924640135478, |
|
"loss": 0.7902, |
|
"step": 17375 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 0.0002658340389500423, |
|
"loss": 0.8293, |
|
"step": 17400 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 0.0002647756138865368, |
|
"loss": 0.8133, |
|
"step": 17425 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"learning_rate": 0.0002637171888230314, |
|
"loss": 0.8323, |
|
"step": 17450 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 0.00026265876375952587, |
|
"loss": 0.8169, |
|
"step": 17475 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"learning_rate": 0.00026160033869602036, |
|
"loss": 0.7773, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 0.00026054191363251484, |
|
"loss": 0.7888, |
|
"step": 17525 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 0.00025948348856900933, |
|
"loss": 0.828, |
|
"step": 17550 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 0.0002584250635055038, |
|
"loss": 0.8018, |
|
"step": 17575 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 0.0002573666384419983, |
|
"loss": 0.7747, |
|
"step": 17600 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 0.0002563082133784928, |
|
"loss": 0.7546, |
|
"step": 17625 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 0.00025524978831498727, |
|
"loss": 0.8317, |
|
"step": 17650 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 0.0002541913632514818, |
|
"loss": 0.8325, |
|
"step": 17675 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 0.0002531329381879763, |
|
"loss": 0.8144, |
|
"step": 17700 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"learning_rate": 0.0002520745131244708, |
|
"loss": 0.8127, |
|
"step": 17725 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 0.00025101608806096527, |
|
"loss": 0.823, |
|
"step": 17750 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 0.0002499576629974598, |
|
"loss": 0.8121, |
|
"step": 17775 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"learning_rate": 0.0002488992379339543, |
|
"loss": 0.8345, |
|
"step": 17800 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"learning_rate": 0.0002478408128704488, |
|
"loss": 0.8262, |
|
"step": 17825 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"learning_rate": 0.00024678238780694327, |
|
"loss": 0.822, |
|
"step": 17850 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 0.0002457239627434378, |
|
"loss": 0.8367, |
|
"step": 17875 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 0.0002446655376799323, |
|
"loss": 0.8349, |
|
"step": 17900 |
|
}, |
|
{ |
|
"epoch": 7.57, |
|
"learning_rate": 0.00024360711261642675, |
|
"loss": 0.8144, |
|
"step": 17925 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 0.00024254868755292126, |
|
"loss": 0.8128, |
|
"step": 17950 |
|
}, |
|
{ |
|
"epoch": 7.59, |
|
"learning_rate": 0.00024149026248941575, |
|
"loss": 0.8355, |
|
"step": 17975 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 0.00024043183742591023, |
|
"loss": 0.8113, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 0.00023937341236240475, |
|
"loss": 0.7984, |
|
"step": 18025 |
|
}, |
|
{ |
|
"epoch": 7.63, |
|
"learning_rate": 0.00023831498729889926, |
|
"loss": 0.8436, |
|
"step": 18050 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 0.00023725656223539375, |
|
"loss": 0.7881, |
|
"step": 18075 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 0.00023619813717188823, |
|
"loss": 0.7962, |
|
"step": 18100 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 0.00023513971210838272, |
|
"loss": 0.8086, |
|
"step": 18125 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 0.0002340812870448772, |
|
"loss": 0.7818, |
|
"step": 18150 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"learning_rate": 0.00023302286198137174, |
|
"loss": 0.8205, |
|
"step": 18175 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 0.00023196443691786623, |
|
"loss": 0.8163, |
|
"step": 18200 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 0.00023090601185436072, |
|
"loss": 0.8084, |
|
"step": 18225 |
|
}, |
|
{ |
|
"epoch": 7.71, |
|
"learning_rate": 0.0002298475867908552, |
|
"loss": 0.8133, |
|
"step": 18250 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"learning_rate": 0.00022878916172734971, |
|
"loss": 0.8366, |
|
"step": 18275 |
|
}, |
|
{ |
|
"epoch": 7.73, |
|
"learning_rate": 0.0002277307366638442, |
|
"loss": 0.7847, |
|
"step": 18300 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"learning_rate": 0.00022667231160033869, |
|
"loss": 0.8318, |
|
"step": 18325 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"learning_rate": 0.0002256138865368332, |
|
"loss": 0.8503, |
|
"step": 18350 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"learning_rate": 0.0002245554614733277, |
|
"loss": 0.8147, |
|
"step": 18375 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"learning_rate": 0.0002234970364098222, |
|
"loss": 0.8033, |
|
"step": 18400 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 0.00022243861134631668, |
|
"loss": 0.8463, |
|
"step": 18425 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"learning_rate": 0.00022138018628281117, |
|
"loss": 0.7797, |
|
"step": 18450 |
|
}, |
|
{ |
|
"epoch": 7.81, |
|
"learning_rate": 0.00022032176121930565, |
|
"loss": 0.8037, |
|
"step": 18475 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"learning_rate": 0.0002192633361558002, |
|
"loss": 0.8347, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 0.00021820491109229468, |
|
"loss": 0.8004, |
|
"step": 18525 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 0.00021714648602878917, |
|
"loss": 0.7951, |
|
"step": 18550 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"learning_rate": 0.00021608806096528365, |
|
"loss": 0.8264, |
|
"step": 18575 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"learning_rate": 0.00021502963590177816, |
|
"loss": 0.8092, |
|
"step": 18600 |
|
}, |
|
{ |
|
"epoch": 7.87, |
|
"learning_rate": 0.00021397121083827265, |
|
"loss": 0.8175, |
|
"step": 18625 |
|
}, |
|
{ |
|
"epoch": 7.88, |
|
"learning_rate": 0.00021291278577476714, |
|
"loss": 0.843, |
|
"step": 18650 |
|
}, |
|
{ |
|
"epoch": 7.89, |
|
"learning_rate": 0.00021185436071126165, |
|
"loss": 0.8217, |
|
"step": 18675 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 0.00021079593564775614, |
|
"loss": 0.8412, |
|
"step": 18700 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 0.00020973751058425065, |
|
"loss": 0.8262, |
|
"step": 18725 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"learning_rate": 0.00020867908552074513, |
|
"loss": 0.7942, |
|
"step": 18750 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 0.00020762066045723962, |
|
"loss": 0.7943, |
|
"step": 18775 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 0.0002065622353937341, |
|
"loss": 0.8093, |
|
"step": 18800 |
|
}, |
|
{ |
|
"epoch": 7.95, |
|
"learning_rate": 0.00020550381033022865, |
|
"loss": 0.836, |
|
"step": 18825 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"learning_rate": 0.00020444538526672313, |
|
"loss": 0.8339, |
|
"step": 18850 |
|
}, |
|
{ |
|
"epoch": 7.97, |
|
"learning_rate": 0.00020338696020321762, |
|
"loss": 0.8359, |
|
"step": 18875 |
|
}, |
|
{ |
|
"epoch": 7.98, |
|
"learning_rate": 0.0002023285351397121, |
|
"loss": 0.8264, |
|
"step": 18900 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 0.00020127011007620662, |
|
"loss": 0.8115, |
|
"step": 18925 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.1491515636444092, |
|
"eval_runtime": 4396.2585, |
|
"eval_samples_per_second": 1.074, |
|
"eval_steps_per_second": 0.134, |
|
"eval_wer": 30.25990850572851, |
|
"step": 18936 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 0.0002002116850127011, |
|
"loss": 0.7965, |
|
"step": 18950 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 0.00019915325994919561, |
|
"loss": 0.7948, |
|
"step": 18975 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 0.0001980948348856901, |
|
"loss": 0.7899, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 0.00019703640982218459, |
|
"loss": 0.8032, |
|
"step": 19025 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 0.0001959779847586791, |
|
"loss": 0.8157, |
|
"step": 19050 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 0.00019491955969517358, |
|
"loss": 0.8071, |
|
"step": 19075 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"learning_rate": 0.00019386113463166807, |
|
"loss": 0.8087, |
|
"step": 19100 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 0.00019280270956816256, |
|
"loss": 0.7975, |
|
"step": 19125 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 0.0001917866215071973, |
|
"loss": 0.772, |
|
"step": 19150 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 0.0001907281964436918, |
|
"loss": 0.8019, |
|
"step": 19175 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 0.0001896697713801863, |
|
"loss": 0.7898, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 0.00018861134631668078, |
|
"loss": 0.7724, |
|
"step": 19225 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"learning_rate": 0.00018755292125317526, |
|
"loss": 0.7709, |
|
"step": 19250 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"learning_rate": 0.00018649449618966978, |
|
"loss": 0.7686, |
|
"step": 19275 |
|
}, |
|
{ |
|
"epoch": 8.15, |
|
"learning_rate": 0.0001854360711261643, |
|
"loss": 0.8047, |
|
"step": 19300 |
|
}, |
|
{ |
|
"epoch": 8.16, |
|
"learning_rate": 0.00018437764606265877, |
|
"loss": 0.7813, |
|
"step": 19325 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 0.00018331922099915326, |
|
"loss": 0.7989, |
|
"step": 19350 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 0.00018226079593564777, |
|
"loss": 0.7918, |
|
"step": 19375 |
|
}, |
|
{ |
|
"epoch": 8.2, |
|
"learning_rate": 0.00018120237087214226, |
|
"loss": 0.8023, |
|
"step": 19400 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 0.00018014394580863675, |
|
"loss": 0.8119, |
|
"step": 19425 |
|
}, |
|
{ |
|
"epoch": 8.22, |
|
"learning_rate": 0.00017908552074513123, |
|
"loss": 0.7831, |
|
"step": 19450 |
|
}, |
|
{ |
|
"epoch": 8.23, |
|
"learning_rate": 0.00017802709568162574, |
|
"loss": 0.8159, |
|
"step": 19475 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 0.00017696867061812026, |
|
"loss": 0.8149, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"learning_rate": 0.00017591024555461474, |
|
"loss": 0.7954, |
|
"step": 19525 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 0.00017485182049110923, |
|
"loss": 0.7951, |
|
"step": 19550 |
|
}, |
|
{ |
|
"epoch": 8.27, |
|
"learning_rate": 0.00017379339542760371, |
|
"loss": 0.8232, |
|
"step": 19575 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"learning_rate": 0.00017273497036409823, |
|
"loss": 0.8129, |
|
"step": 19600 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 0.00017167654530059274, |
|
"loss": 0.8182, |
|
"step": 19625 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 0.00017061812023708723, |
|
"loss": 0.7852, |
|
"step": 19650 |
|
}, |
|
{ |
|
"epoch": 8.31, |
|
"learning_rate": 0.0001695596951735817, |
|
"loss": 0.787, |
|
"step": 19675 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 0.0001685012701100762, |
|
"loss": 0.8176, |
|
"step": 19700 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 0.0001674428450465707, |
|
"loss": 0.7965, |
|
"step": 19725 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"learning_rate": 0.0001663844199830652, |
|
"loss": 0.7661, |
|
"step": 19750 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 0.00016532599491955968, |
|
"loss": 0.8, |
|
"step": 19775 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"learning_rate": 0.0001642675698560542, |
|
"loss": 0.7841, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"learning_rate": 0.0001632091447925487, |
|
"loss": 0.7963, |
|
"step": 19825 |
|
}, |
|
{ |
|
"epoch": 8.39, |
|
"learning_rate": 0.0001621507197290432, |
|
"loss": 0.7871, |
|
"step": 19850 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 0.00016109229466553768, |
|
"loss": 0.7896, |
|
"step": 19875 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 0.00016003386960203217, |
|
"loss": 0.785, |
|
"step": 19900 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"learning_rate": 0.00015897544453852668, |
|
"loss": 0.8418, |
|
"step": 19925 |
|
}, |
|
{ |
|
"epoch": 8.43, |
|
"learning_rate": 0.0001579170194750212, |
|
"loss": 0.7504, |
|
"step": 19950 |
|
}, |
|
{ |
|
"epoch": 8.44, |
|
"learning_rate": 0.00015685859441151568, |
|
"loss": 0.8043, |
|
"step": 19975 |
|
}, |
|
{ |
|
"epoch": 8.45, |
|
"learning_rate": 0.00015580016934801016, |
|
"loss": 0.7874, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 8.46, |
|
"learning_rate": 0.00015474174428450465, |
|
"loss": 0.7888, |
|
"step": 20025 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 0.00015368331922099916, |
|
"loss": 0.7702, |
|
"step": 20050 |
|
}, |
|
{ |
|
"epoch": 8.48, |
|
"learning_rate": 0.00015262489415749365, |
|
"loss": 0.8029, |
|
"step": 20075 |
|
}, |
|
{ |
|
"epoch": 8.49, |
|
"learning_rate": 0.00015156646909398813, |
|
"loss": 0.772, |
|
"step": 20100 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 0.00015050804403048265, |
|
"loss": 0.7777, |
|
"step": 20125 |
|
}, |
|
{ |
|
"epoch": 8.51, |
|
"learning_rate": 0.00014944961896697716, |
|
"loss": 0.7935, |
|
"step": 20150 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 0.00014839119390347164, |
|
"loss": 0.8093, |
|
"step": 20175 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"learning_rate": 0.00014733276883996613, |
|
"loss": 0.7686, |
|
"step": 20200 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"learning_rate": 0.00014627434377646062, |
|
"loss": 0.8484, |
|
"step": 20225 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"learning_rate": 0.0001452159187129551, |
|
"loss": 0.8237, |
|
"step": 20250 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 0.00014415749364944964, |
|
"loss": 0.7896, |
|
"step": 20275 |
|
}, |
|
{ |
|
"epoch": 8.58, |
|
"learning_rate": 0.00014309906858594413, |
|
"loss": 0.8252, |
|
"step": 20300 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"learning_rate": 0.0001420406435224386, |
|
"loss": 0.8128, |
|
"step": 20325 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 0.0001409822184589331, |
|
"loss": 0.8201, |
|
"step": 20350 |
|
}, |
|
{ |
|
"epoch": 8.61, |
|
"learning_rate": 0.0001399237933954276, |
|
"loss": 0.7848, |
|
"step": 20375 |
|
}, |
|
{ |
|
"epoch": 8.62, |
|
"learning_rate": 0.0001388653683319221, |
|
"loss": 0.8147, |
|
"step": 20400 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 0.0001378069432684166, |
|
"loss": 0.7742, |
|
"step": 20425 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"learning_rate": 0.0001367485182049111, |
|
"loss": 0.7973, |
|
"step": 20450 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 0.0001356900931414056, |
|
"loss": 0.7897, |
|
"step": 20475 |
|
}, |
|
{ |
|
"epoch": 8.66, |
|
"learning_rate": 0.0001346316680779001, |
|
"loss": 0.8092, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"learning_rate": 0.00013357324301439458, |
|
"loss": 0.749, |
|
"step": 20525 |
|
}, |
|
{ |
|
"epoch": 8.68, |
|
"learning_rate": 0.00013251481795088907, |
|
"loss": 0.7846, |
|
"step": 20550 |
|
}, |
|
{ |
|
"epoch": 8.69, |
|
"learning_rate": 0.00013145639288738355, |
|
"loss": 0.8339, |
|
"step": 20575 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 0.0001303979678238781, |
|
"loss": 0.8287, |
|
"step": 20600 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 0.00012933954276037258, |
|
"loss": 0.8201, |
|
"step": 20625 |
|
}, |
|
{ |
|
"epoch": 8.72, |
|
"learning_rate": 0.00012828111769686706, |
|
"loss": 0.7572, |
|
"step": 20650 |
|
}, |
|
{ |
|
"epoch": 8.73, |
|
"learning_rate": 0.00012722269263336155, |
|
"loss": 0.806, |
|
"step": 20675 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"learning_rate": 0.00012616426756985606, |
|
"loss": 0.7886, |
|
"step": 20700 |
|
}, |
|
{ |
|
"epoch": 8.76, |
|
"learning_rate": 0.00012510584250635055, |
|
"loss": 0.8396, |
|
"step": 20725 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"learning_rate": 0.00012404741744284506, |
|
"loss": 0.7755, |
|
"step": 20750 |
|
}, |
|
{ |
|
"epoch": 8.78, |
|
"learning_rate": 0.00012298899237933955, |
|
"loss": 0.8043, |
|
"step": 20775 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 0.00012193056731583405, |
|
"loss": 0.7772, |
|
"step": 20800 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"learning_rate": 0.00012087214225232853, |
|
"loss": 0.7739, |
|
"step": 20825 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"learning_rate": 0.00011981371718882303, |
|
"loss": 0.7837, |
|
"step": 20850 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 0.00011875529212531753, |
|
"loss": 0.8591, |
|
"step": 20875 |
|
}, |
|
{ |
|
"epoch": 8.83, |
|
"learning_rate": 0.00011769686706181203, |
|
"loss": 0.817, |
|
"step": 20900 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"learning_rate": 0.00011663844199830652, |
|
"loss": 0.8072, |
|
"step": 20925 |
|
}, |
|
{ |
|
"epoch": 8.85, |
|
"learning_rate": 0.00011558001693480103, |
|
"loss": 0.799, |
|
"step": 20950 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"learning_rate": 0.00011452159187129551, |
|
"loss": 0.8034, |
|
"step": 20975 |
|
}, |
|
{ |
|
"epoch": 8.87, |
|
"learning_rate": 0.00011346316680779001, |
|
"loss": 0.7951, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"learning_rate": 0.00011240474174428451, |
|
"loss": 0.7958, |
|
"step": 21025 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 0.000111346316680779, |
|
"loss": 0.7567, |
|
"step": 21050 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"learning_rate": 0.0001102878916172735, |
|
"loss": 0.7791, |
|
"step": 21075 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"learning_rate": 0.00010922946655376798, |
|
"loss": 0.8189, |
|
"step": 21100 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"learning_rate": 0.0001081710414902625, |
|
"loss": 0.8048, |
|
"step": 21125 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"learning_rate": 0.00010711261642675698, |
|
"loss": 0.7884, |
|
"step": 21150 |
|
}, |
|
{ |
|
"epoch": 8.95, |
|
"learning_rate": 0.00010605419136325148, |
|
"loss": 0.7908, |
|
"step": 21175 |
|
}, |
|
{ |
|
"epoch": 8.96, |
|
"learning_rate": 0.00010499576629974598, |
|
"loss": 0.768, |
|
"step": 21200 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"learning_rate": 0.00010393734123624048, |
|
"loss": 0.811, |
|
"step": 21225 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"learning_rate": 0.00010287891617273497, |
|
"loss": 0.8204, |
|
"step": 21250 |
|
}, |
|
{ |
|
"epoch": 8.99, |
|
"learning_rate": 0.00010182049110922948, |
|
"loss": 0.7989, |
|
"step": 21275 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 0.00010076206604572397, |
|
"loss": 0.7884, |
|
"step": 21300 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 1.1450048685073853, |
|
"eval_runtime": 4394.6356, |
|
"eval_samples_per_second": 1.075, |
|
"eval_steps_per_second": 0.134, |
|
"eval_wer": 30.689040929517024, |
|
"step": 21303 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 9.970364098221845e-05, |
|
"loss": 0.7619, |
|
"step": 21325 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 9.864521591871296e-05, |
|
"loss": 0.7994, |
|
"step": 21350 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 9.758679085520745e-05, |
|
"loss": 0.7808, |
|
"step": 21375 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 9.652836579170195e-05, |
|
"loss": 0.8, |
|
"step": 21400 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 9.546994072819644e-05, |
|
"loss": 0.7678, |
|
"step": 21425 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"learning_rate": 9.441151566469095e-05, |
|
"loss": 0.7871, |
|
"step": 21450 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 9.335309060118543e-05, |
|
"loss": 0.7902, |
|
"step": 21475 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"learning_rate": 9.229466553767993e-05, |
|
"loss": 0.7675, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 9.123624047417443e-05, |
|
"loss": 0.7976, |
|
"step": 21525 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 9.017781541066893e-05, |
|
"loss": 0.7772, |
|
"step": 21550 |
|
}, |
|
{ |
|
"epoch": 9.11, |
|
"learning_rate": 8.911939034716342e-05, |
|
"loss": 0.7614, |
|
"step": 21575 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"learning_rate": 8.806096528365792e-05, |
|
"loss": 0.8134, |
|
"step": 21600 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"learning_rate": 8.700254022015242e-05, |
|
"loss": 0.7837, |
|
"step": 21625 |
|
}, |
|
{ |
|
"epoch": 9.15, |
|
"learning_rate": 8.59441151566469e-05, |
|
"loss": 0.7952, |
|
"step": 21650 |
|
}, |
|
{ |
|
"epoch": 9.16, |
|
"learning_rate": 8.488569009314142e-05, |
|
"loss": 0.8178, |
|
"step": 21675 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"learning_rate": 8.38272650296359e-05, |
|
"loss": 0.8111, |
|
"step": 21700 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"learning_rate": 8.27688399661304e-05, |
|
"loss": 0.7577, |
|
"step": 21725 |
|
}, |
|
{ |
|
"epoch": 9.19, |
|
"learning_rate": 8.17104149026249e-05, |
|
"loss": 0.7944, |
|
"step": 21750 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 8.06519898391194e-05, |
|
"loss": 0.8085, |
|
"step": 21775 |
|
}, |
|
{ |
|
"epoch": 9.21, |
|
"learning_rate": 7.959356477561388e-05, |
|
"loss": 0.8087, |
|
"step": 21800 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"learning_rate": 7.853513971210838e-05, |
|
"loss": 0.787, |
|
"step": 21825 |
|
}, |
|
{ |
|
"epoch": 9.23, |
|
"learning_rate": 7.747671464860288e-05, |
|
"loss": 0.7749, |
|
"step": 21850 |
|
}, |
|
{ |
|
"epoch": 9.24, |
|
"learning_rate": 7.641828958509737e-05, |
|
"loss": 0.7469, |
|
"step": 21875 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 7.535986452159187e-05, |
|
"loss": 0.7828, |
|
"step": 21900 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"learning_rate": 7.430143945808637e-05, |
|
"loss": 0.78, |
|
"step": 21925 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 7.324301439458087e-05, |
|
"loss": 0.7895, |
|
"step": 21950 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 7.218458933107535e-05, |
|
"loss": 0.8275, |
|
"step": 21975 |
|
}, |
|
{ |
|
"epoch": 9.29, |
|
"learning_rate": 7.112616426756987e-05, |
|
"loss": 0.8019, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 9.31, |
|
"learning_rate": 7.006773920406435e-05, |
|
"loss": 0.7995, |
|
"step": 22025 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"learning_rate": 6.900931414055885e-05, |
|
"loss": 0.7857, |
|
"step": 22050 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 6.795088907705335e-05, |
|
"loss": 0.7791, |
|
"step": 22075 |
|
}, |
|
{ |
|
"epoch": 9.34, |
|
"learning_rate": 6.689246401354785e-05, |
|
"loss": 0.7538, |
|
"step": 22100 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"learning_rate": 6.583403895004234e-05, |
|
"loss": 0.7916, |
|
"step": 22125 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"learning_rate": 6.477561388653684e-05, |
|
"loss": 0.8062, |
|
"step": 22150 |
|
}, |
|
{ |
|
"epoch": 9.37, |
|
"learning_rate": 6.371718882303133e-05, |
|
"loss": 0.7551, |
|
"step": 22175 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 6.265876375952582e-05, |
|
"loss": 0.7946, |
|
"step": 22200 |
|
}, |
|
{ |
|
"epoch": 9.39, |
|
"learning_rate": 6.160033869602032e-05, |
|
"loss": 0.8288, |
|
"step": 22225 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 6.054191363251482e-05, |
|
"loss": 0.7838, |
|
"step": 22250 |
|
}, |
|
{ |
|
"epoch": 9.41, |
|
"learning_rate": 5.948348856900931e-05, |
|
"loss": 0.7944, |
|
"step": 22275 |
|
}, |
|
{ |
|
"epoch": 9.42, |
|
"learning_rate": 5.842506350550381e-05, |
|
"loss": 0.7861, |
|
"step": 22300 |
|
}, |
|
{ |
|
"epoch": 9.43, |
|
"learning_rate": 5.7366638441998303e-05, |
|
"loss": 0.8022, |
|
"step": 22325 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"learning_rate": 5.63082133784928e-05, |
|
"loss": 0.7889, |
|
"step": 22350 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 5.52497883149873e-05, |
|
"loss": 0.7619, |
|
"step": 22375 |
|
}, |
|
{ |
|
"epoch": 9.46, |
|
"learning_rate": 5.4191363251481795e-05, |
|
"loss": 0.7825, |
|
"step": 22400 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 5.3132938187976294e-05, |
|
"loss": 0.7598, |
|
"step": 22425 |
|
}, |
|
{ |
|
"epoch": 9.48, |
|
"learning_rate": 5.2074513124470793e-05, |
|
"loss": 0.7659, |
|
"step": 22450 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 5.1016088060965286e-05, |
|
"loss": 0.7651, |
|
"step": 22475 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"learning_rate": 4.9957662997459785e-05, |
|
"loss": 0.7848, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"learning_rate": 4.889923793395427e-05, |
|
"loss": 0.7968, |
|
"step": 22525 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 4.784081287044877e-05, |
|
"loss": 0.8155, |
|
"step": 22550 |
|
}, |
|
{ |
|
"epoch": 9.54, |
|
"learning_rate": 4.678238780694327e-05, |
|
"loss": 0.8096, |
|
"step": 22575 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"learning_rate": 4.572396274343776e-05, |
|
"loss": 0.7779, |
|
"step": 22600 |
|
}, |
|
{ |
|
"epoch": 9.56, |
|
"learning_rate": 4.466553767993226e-05, |
|
"loss": 0.7743, |
|
"step": 22625 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"learning_rate": 4.360711261642676e-05, |
|
"loss": 0.8104, |
|
"step": 22650 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"learning_rate": 4.2548687552921254e-05, |
|
"loss": 0.7797, |
|
"step": 22675 |
|
}, |
|
{ |
|
"epoch": 9.59, |
|
"learning_rate": 4.149026248941575e-05, |
|
"loss": 0.7557, |
|
"step": 22700 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 4.0431837425910246e-05, |
|
"loss": 0.8151, |
|
"step": 22725 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"learning_rate": 3.9373412362404745e-05, |
|
"loss": 0.8009, |
|
"step": 22750 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 3.8314987298899244e-05, |
|
"loss": 0.7728, |
|
"step": 22775 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"learning_rate": 3.725656223539373e-05, |
|
"loss": 0.7901, |
|
"step": 22800 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"learning_rate": 3.619813717188823e-05, |
|
"loss": 0.8149, |
|
"step": 22825 |
|
}, |
|
{ |
|
"epoch": 9.65, |
|
"learning_rate": 3.513971210838273e-05, |
|
"loss": 0.7773, |
|
"step": 22850 |
|
}, |
|
{ |
|
"epoch": 9.66, |
|
"learning_rate": 3.408128704487722e-05, |
|
"loss": 0.811, |
|
"step": 22875 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 3.302286198137172e-05, |
|
"loss": 0.7869, |
|
"step": 22900 |
|
}, |
|
{ |
|
"epoch": 9.69, |
|
"learning_rate": 3.196443691786621e-05, |
|
"loss": 0.7734, |
|
"step": 22925 |
|
}, |
|
{ |
|
"epoch": 9.7, |
|
"learning_rate": 3.090601185436071e-05, |
|
"loss": 0.7745, |
|
"step": 22950 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 2.984758679085521e-05, |
|
"loss": 0.7964, |
|
"step": 22975 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"learning_rate": 2.87891617273497e-05, |
|
"loss": 0.8091, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 2.77307366638442e-05, |
|
"loss": 0.7633, |
|
"step": 23025 |
|
}, |
|
{ |
|
"epoch": 9.74, |
|
"learning_rate": 2.6672311600338696e-05, |
|
"loss": 0.7806, |
|
"step": 23050 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 2.5613886536833192e-05, |
|
"loss": 0.7932, |
|
"step": 23075 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 2.455546147332769e-05, |
|
"loss": 0.7834, |
|
"step": 23100 |
|
}, |
|
{ |
|
"epoch": 9.77, |
|
"learning_rate": 2.3497036409822188e-05, |
|
"loss": 0.7714, |
|
"step": 23125 |
|
}, |
|
{ |
|
"epoch": 9.78, |
|
"learning_rate": 2.243861134631668e-05, |
|
"loss": 0.7877, |
|
"step": 23150 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"learning_rate": 2.1380186282811176e-05, |
|
"loss": 0.7596, |
|
"step": 23175 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 2.0321761219305672e-05, |
|
"loss": 0.802, |
|
"step": 23200 |
|
}, |
|
{ |
|
"epoch": 9.81, |
|
"learning_rate": 1.926333615580017e-05, |
|
"loss": 0.7799, |
|
"step": 23225 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"learning_rate": 1.8204911092294667e-05, |
|
"loss": 0.801, |
|
"step": 23250 |
|
}, |
|
{ |
|
"epoch": 9.83, |
|
"learning_rate": 1.714648602878916e-05, |
|
"loss": 0.7705, |
|
"step": 23275 |
|
}, |
|
{ |
|
"epoch": 9.84, |
|
"learning_rate": 1.6088060965283656e-05, |
|
"loss": 0.7636, |
|
"step": 23300 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 1.5029635901778154e-05, |
|
"loss": 0.8273, |
|
"step": 23325 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"learning_rate": 1.3971210838272651e-05, |
|
"loss": 0.7904, |
|
"step": 23350 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"learning_rate": 1.2912785774767146e-05, |
|
"loss": 0.7859, |
|
"step": 23375 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"learning_rate": 1.1854360711261643e-05, |
|
"loss": 0.8122, |
|
"step": 23400 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"learning_rate": 1.079593564775614e-05, |
|
"loss": 0.7548, |
|
"step": 23425 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 9.737510584250635e-06, |
|
"loss": 0.8049, |
|
"step": 23450 |
|
}, |
|
{ |
|
"epoch": 9.92, |
|
"learning_rate": 8.679085520745131e-06, |
|
"loss": 0.8015, |
|
"step": 23475 |
|
}, |
|
{ |
|
"epoch": 9.93, |
|
"learning_rate": 7.620660457239628e-06, |
|
"loss": 0.8026, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 9.94, |
|
"learning_rate": 6.562235393734124e-06, |
|
"loss": 0.7563, |
|
"step": 23525 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"learning_rate": 5.50381033022862e-06, |
|
"loss": 0.772, |
|
"step": 23550 |
|
}, |
|
{ |
|
"epoch": 9.96, |
|
"learning_rate": 4.445385266723117e-06, |
|
"loss": 0.8002, |
|
"step": 23575 |
|
}, |
|
{ |
|
"epoch": 9.97, |
|
"learning_rate": 3.386960203217612e-06, |
|
"loss": 0.7878, |
|
"step": 23600 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"learning_rate": 2.328535139712108e-06, |
|
"loss": 0.767, |
|
"step": 23625 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"learning_rate": 1.2701100762066045e-06, |
|
"loss": 0.802, |
|
"step": 23650 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 1.1429458856582642, |
|
"eval_runtime": 4707.3076, |
|
"eval_samples_per_second": 1.003, |
|
"eval_steps_per_second": 0.126, |
|
"eval_wer": 26.11837577426015, |
|
"step": 23670 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 23670, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 2.034797628063744e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|