|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.9992034411343, |
|
"global_step": 3920, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.7499999999999997e-06, |
|
"loss": 12.5263, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.05e-05, |
|
"loss": 9.3683, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.7999999999999997e-05, |
|
"loss": 6.2513, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.55e-05, |
|
"loss": 5.2618, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.2999999999999996e-05, |
|
"loss": 4.6846, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.05e-05, |
|
"loss": 5.3931, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.7999999999999994e-05, |
|
"loss": 4.5171, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5.5499999999999994e-05, |
|
"loss": 4.2914, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 6.299999999999999e-05, |
|
"loss": 4.0871, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.049999999999999e-05, |
|
"loss": 3.7123, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 7.8e-05, |
|
"loss": 4.4868, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.549999999999999e-05, |
|
"loss": 3.8837, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.3e-05, |
|
"loss": 3.7156, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001005, |
|
"loss": 3.6339, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00010799999999999998, |
|
"loss": 3.1646, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00011549999999999999, |
|
"loss": 4.1434, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00012299999999999998, |
|
"loss": 3.612, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001305, |
|
"loss": 3.5163, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000138, |
|
"loss": 3.4249, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00014549999999999999, |
|
"loss": 2.9259, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00015299999999999998, |
|
"loss": 3.9771, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001605, |
|
"loss": 3.4041, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000168, |
|
"loss": 3.3504, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00017549999999999998, |
|
"loss": 3.2431, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00018299999999999998, |
|
"loss": 2.674, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001905, |
|
"loss": 3.6828, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000198, |
|
"loss": 3.3201, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002055, |
|
"loss": 3.1553, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00021299999999999997, |
|
"loss": 3.1109, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00022049999999999997, |
|
"loss": 2.5012, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00022799999999999999, |
|
"loss": 3.6064, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00023549999999999998, |
|
"loss": 3.2355, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000243, |
|
"loss": 3.1318, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00025049999999999996, |
|
"loss": 3.0418, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000258, |
|
"loss": 2.3601, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0002655, |
|
"loss": 3.6744, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00027299999999999997, |
|
"loss": 3.1284, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0002805, |
|
"loss": 3.0404, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00028799999999999995, |
|
"loss": 2.8827, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00029549999999999997, |
|
"loss": 2.3036, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 4.219922065734863, |
|
"eval_runtime": 978.7091, |
|
"eval_samples_per_second": 6.369, |
|
"eval_steps_per_second": 0.797, |
|
"eval_wer": 0.9499730488280888, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0002996590909090909, |
|
"loss": 3.7398, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0002988068181818181, |
|
"loss": 3.1013, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0002979545454545454, |
|
"loss": 2.9651, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0002971022727272727, |
|
"loss": 2.8712, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00029624999999999996, |
|
"loss": 2.1188, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00029539772727272726, |
|
"loss": 3.7197, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0002945454545454545, |
|
"loss": 3.0581, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0002936931818181818, |
|
"loss": 2.9541, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0002928409090909091, |
|
"loss": 2.7738, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00029198863636363634, |
|
"loss": 2.1907, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0002911363636363636, |
|
"loss": 3.617, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002902840909090909, |
|
"loss": 2.9914, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00028943181818181813, |
|
"loss": 2.9248, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0002885795454545454, |
|
"loss": 2.7295, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0002877272727272727, |
|
"loss": 2.1081, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00028687499999999997, |
|
"loss": 3.4299, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002860227272727272, |
|
"loss": 2.917, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002851704545454545, |
|
"loss": 2.8774, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0002843181818181818, |
|
"loss": 2.7049, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002834659090909091, |
|
"loss": 2.0966, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00028261363636363635, |
|
"loss": 3.381, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0002817613636363636, |
|
"loss": 2.9034, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002809090909090909, |
|
"loss": 2.8067, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00028005681818181813, |
|
"loss": 2.6366, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00027920454545454543, |
|
"loss": 2.057, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002783522727272727, |
|
"loss": 3.3471, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00027749999999999997, |
|
"loss": 2.895, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002766477272727272, |
|
"loss": 2.7621, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0002757954545454545, |
|
"loss": 2.6106, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0002749431818181818, |
|
"loss": 1.965, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0002740909090909091, |
|
"loss": 3.4336, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00027323863636363635, |
|
"loss": 2.9011, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0002723863636363636, |
|
"loss": 2.7347, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0002715340909090909, |
|
"loss": 2.6067, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00027068181818181813, |
|
"loss": 2.0804, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00026982954545454543, |
|
"loss": 3.5563, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00026897727272727273, |
|
"loss": 2.8215, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00026812499999999997, |
|
"loss": 2.5426, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0002672727272727272, |
|
"loss": 2.848, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002664204545454545, |
|
"loss": 2.836, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.7747511863708496, |
|
"eval_runtime": 1386.8434, |
|
"eval_samples_per_second": 4.494, |
|
"eval_steps_per_second": 0.562, |
|
"eval_wer": 1.2757569561904054, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0002655681818181818, |
|
"loss": 2.6687, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0002647159090909091, |
|
"loss": 2.5294, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00026386363636363635, |
|
"loss": 2.2344, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0002630113636363636, |
|
"loss": 2.8092, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0002621590909090909, |
|
"loss": 2.8033, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00026130681818181814, |
|
"loss": 2.6581, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00026045454545454543, |
|
"loss": 2.5385, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00025960227272727273, |
|
"loss": 2.2285, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00025875, |
|
"loss": 2.7362, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0002578977272727272, |
|
"loss": 2.7753, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0002570454545454545, |
|
"loss": 2.6267, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002561931818181818, |
|
"loss": 2.4796, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0002553409090909091, |
|
"loss": 2.1629, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00025448863636363635, |
|
"loss": 2.7308, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0002536363636363636, |
|
"loss": 2.8126, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0002527840909090909, |
|
"loss": 2.6516, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00025193181818181814, |
|
"loss": 2.4509, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00025107954545454544, |
|
"loss": 2.1525, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00025022727272727273, |
|
"loss": 2.6477, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.000249375, |
|
"loss": 2.7505, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0002485227272727272, |
|
"loss": 2.629, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0002476704545454545, |
|
"loss": 2.4232, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002468181818181818, |
|
"loss": 2.0637, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0002459659090909091, |
|
"loss": 2.6852, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00024511363636363636, |
|
"loss": 2.753, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0002442613636363636, |
|
"loss": 2.5709, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0002434090909090909, |
|
"loss": 2.4777, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00024255681818181814, |
|
"loss": 2.1415, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00024170454545454544, |
|
"loss": 2.5522, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0002408522727272727, |
|
"loss": 2.6903, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 2.5554, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00023914772727272725, |
|
"loss": 2.4458, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00023829545454545452, |
|
"loss": 2.1449, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00023744318181818182, |
|
"loss": 2.6099, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0002365909090909091, |
|
"loss": 2.6866, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00023573863636363633, |
|
"loss": 2.5955, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00023488636363636363, |
|
"loss": 2.4689, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0002340340909090909, |
|
"loss": 2.0902, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00023318181818181814, |
|
"loss": 2.4791, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00023232954545454544, |
|
"loss": 2.7021, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.621112585067749, |
|
"eval_runtime": 1372.4396, |
|
"eval_samples_per_second": 4.542, |
|
"eval_steps_per_second": 0.568, |
|
"eval_wer": 1.1724967937398934, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0002314772727272727, |
|
"loss": 2.5888, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00023062499999999998, |
|
"loss": 2.4004, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00022977272727272725, |
|
"loss": 2.0814, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00022892045454545452, |
|
"loss": 2.5302, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0002280681818181818, |
|
"loss": 2.6644, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0002272159090909091, |
|
"loss": 2.5413, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00022636363636363633, |
|
"loss": 2.3677, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0002255113636363636, |
|
"loss": 2.0107, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0002246590909090909, |
|
"loss": 2.3977, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00022380681818181815, |
|
"loss": 2.6459, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00022295454545454544, |
|
"loss": 2.5077, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002221022727272727, |
|
"loss": 2.3272, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00022124999999999998, |
|
"loss": 1.9862, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00022039772727272725, |
|
"loss": 2.3923, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00021954545454545452, |
|
"loss": 2.6402, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0002186931818181818, |
|
"loss": 2.4859, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002178409090909091, |
|
"loss": 2.3107, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00021698863636363634, |
|
"loss": 2.0173, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0002161363636363636, |
|
"loss": 2.3328, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0002152840909090909, |
|
"loss": 2.5905, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00021443181818181815, |
|
"loss": 2.4717, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00021357954545454542, |
|
"loss": 2.39, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00021272727272727272, |
|
"loss": 1.9524, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.000211875, |
|
"loss": 2.365, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00021102272727272723, |
|
"loss": 2.558, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00021017045454545453, |
|
"loss": 2.4819, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0002093181818181818, |
|
"loss": 2.3133, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002084659090909091, |
|
"loss": 1.9815, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00020761363636363634, |
|
"loss": 2.4231, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0002067613636363636, |
|
"loss": 2.6569, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.0002059090909090909, |
|
"loss": 2.4576, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00020505681818181815, |
|
"loss": 2.3225, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00020420454545454542, |
|
"loss": 2.0289, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00020335227272727272, |
|
"loss": 2.3312, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0002025, |
|
"loss": 2.5694, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00020164772727272723, |
|
"loss": 2.4322, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00020079545454545453, |
|
"loss": 2.1472, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0001999431818181818, |
|
"loss": 2.6277, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0001990909090909091, |
|
"loss": 2.3966, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00019823863636363634, |
|
"loss": 2.2322, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 2.5714354515075684, |
|
"eval_runtime": 1370.556, |
|
"eval_samples_per_second": 4.548, |
|
"eval_steps_per_second": 0.569, |
|
"eval_wer": 1.1041616326834074, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0001973863636363636, |
|
"loss": 1.9566, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0001965340909090909, |
|
"loss": 1.7948, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00019568181818181815, |
|
"loss": 2.6895, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00019482954545454542, |
|
"loss": 2.3899, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.00019397727272727272, |
|
"loss": 2.2251, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.000193125, |
|
"loss": 1.9633, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.00019227272727272723, |
|
"loss": 1.7536, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00019142045454545453, |
|
"loss": 2.684, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.0001905681818181818, |
|
"loss": 2.448, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.0001897159090909091, |
|
"loss": 2.2632, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00018886363636363634, |
|
"loss": 2.0362, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.00018801136363636361, |
|
"loss": 1.8685, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.0001871590909090909, |
|
"loss": 2.6846, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.00018630681818181816, |
|
"loss": 2.3884, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.00018545454545454543, |
|
"loss": 2.337, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00018460227272727272, |
|
"loss": 1.965, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.00018375, |
|
"loss": 1.7979, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00018289772727272724, |
|
"loss": 2.6436, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00018204545454545454, |
|
"loss": 2.4121, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0001811931818181818, |
|
"loss": 2.2591, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.0001803409090909091, |
|
"loss": 2.0358, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.00017948863636363635, |
|
"loss": 1.8178, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00017863636363636362, |
|
"loss": 2.6027, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.00017778409090909091, |
|
"loss": 2.3807, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.00017693181818181816, |
|
"loss": 2.2269, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00017607954545454543, |
|
"loss": 1.9806, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.00017522727272727273, |
|
"loss": 1.7612, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.000174375, |
|
"loss": 2.5986, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.00017352272727272724, |
|
"loss": 2.3444, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.00017267045454545454, |
|
"loss": 2.1959, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.0001718181818181818, |
|
"loss": 1.8388, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.0001709659090909091, |
|
"loss": 1.7236, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.00017011363636363635, |
|
"loss": 2.6009, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.00016926136363636362, |
|
"loss": 2.3328, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.00016840909090909092, |
|
"loss": 2.134, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.00016755681818181816, |
|
"loss": 1.8768, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.00016670454545454543, |
|
"loss": 1.6798, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.00016585227272727273, |
|
"loss": 2.5955, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.000165, |
|
"loss": 2.3165, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.00016414772727272724, |
|
"loss": 2.1435, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_loss": 2.4204635620117188, |
|
"eval_runtime": 1359.7511, |
|
"eval_samples_per_second": 4.584, |
|
"eval_steps_per_second": 0.574, |
|
"eval_wer": 1.0092656270329547, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.00016329545454545454, |
|
"loss": 1.8971, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.0001624431818181818, |
|
"loss": 1.6731, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00016159090909090908, |
|
"loss": 2.6731, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.00016073863636363635, |
|
"loss": 2.3067, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.00015988636363636362, |
|
"loss": 2.1082, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.0001590340909090909, |
|
"loss": 1.849, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00015818181818181816, |
|
"loss": 1.6493, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00015732954545454543, |
|
"loss": 2.6306, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.00015647727272727273, |
|
"loss": 2.269, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.000155625, |
|
"loss": 2.1135, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.00015477272727272725, |
|
"loss": 1.8049, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.00015392045454545454, |
|
"loss": 1.695, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.0001530681818181818, |
|
"loss": 2.5111, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00015221590909090908, |
|
"loss": 2.3195, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00015136363636363635, |
|
"loss": 2.077, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.00015051136363636362, |
|
"loss": 1.8057, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0001496590909090909, |
|
"loss": 1.5783, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00014880681818181817, |
|
"loss": 2.5461, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.00014795454545454544, |
|
"loss": 2.2407, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0001471022727272727, |
|
"loss": 2.0751, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.00014624999999999998, |
|
"loss": 1.6996, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.00014539772727272727, |
|
"loss": 1.6127, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.00014454545454545452, |
|
"loss": 2.4968, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00014369318181818182, |
|
"loss": 2.213, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00014284090909090909, |
|
"loss": 2.0047, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00014198863636363636, |
|
"loss": 1.7818, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.00014113636363636363, |
|
"loss": 1.435, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0001402840909090909, |
|
"loss": 2.5173, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.00013943181818181817, |
|
"loss": 2.2263, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00013857954545454544, |
|
"loss": 2.0696, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.0001377272727272727, |
|
"loss": 1.7433, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.00013687499999999998, |
|
"loss": 1.6106, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00013602272727272728, |
|
"loss": 2.5292, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.00013517045454545452, |
|
"loss": 2.0994, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.00013431818181818182, |
|
"loss": 1.6276, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.0001334659090909091, |
|
"loss": 2.3773, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.00013261363636363633, |
|
"loss": 2.1608, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.00013176136363636363, |
|
"loss": 1.8948, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.0001309090909090909, |
|
"loss": 1.642, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.00013005681818181817, |
|
"loss": 1.1205, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"eval_loss": 2.7611794471740723, |
|
"eval_runtime": 1138.461, |
|
"eval_samples_per_second": 5.475, |
|
"eval_steps_per_second": 0.685, |
|
"eval_wer": 0.909806509172692, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.00012920454545454544, |
|
"loss": 2.3052, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.0001283522727272727, |
|
"loss": 2.1374, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.00012749999999999998, |
|
"loss": 1.8912, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.00012664772727272728, |
|
"loss": 1.5719, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.00012579545454545452, |
|
"loss": 1.1514, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.00012494318181818182, |
|
"loss": 2.3621, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.0001240909090909091, |
|
"loss": 2.1977, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.00012323863636363633, |
|
"loss": 1.8366, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.00012238636363636363, |
|
"loss": 1.5233, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 0.00012153409090909089, |
|
"loss": 1.0517, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00012068181818181817, |
|
"loss": 2.3243, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.00011982954545454544, |
|
"loss": 2.0906, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00011897727272727273, |
|
"loss": 1.8305, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.00011812499999999998, |
|
"loss": 1.4674, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.00011727272727272727, |
|
"loss": 1.0586, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.00011642045454545454, |
|
"loss": 2.2792, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.00011556818181818181, |
|
"loss": 2.0311, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 0.00011471590909090908, |
|
"loss": 1.7596, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.00011386363636363635, |
|
"loss": 1.4542, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.00011301136363636362, |
|
"loss": 1.0427, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.00011215909090909089, |
|
"loss": 2.2565, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.00011130681818181818, |
|
"loss": 1.9995, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.00011045454545454545, |
|
"loss": 1.696, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 0.00010960227272727272, |
|
"loss": 1.3768, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.00010874999999999999, |
|
"loss": 0.9125, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.00010789772727272727, |
|
"loss": 2.1988, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 0.00010704545454545453, |
|
"loss": 1.9406, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.00010619318181818181, |
|
"loss": 1.6374, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.00010534090909090908, |
|
"loss": 1.3302, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.00010448863636363635, |
|
"loss": 0.925, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.00010363636363636362, |
|
"loss": 2.1624, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.0001027840909090909, |
|
"loss": 1.8209, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.00010193181818181818, |
|
"loss": 1.4644, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 0.00010107954545454544, |
|
"loss": 1.2247, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.00010022727272727272, |
|
"loss": 0.8943, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 9.937499999999999e-05, |
|
"loss": 2.1089, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 9.852272727272727e-05, |
|
"loss": 1.7865, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 9.767045454545453e-05, |
|
"loss": 1.4291, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 9.681818181818181e-05, |
|
"loss": 1.1092, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 9.596590909090909e-05, |
|
"loss": 0.7821, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"eval_loss": 2.290031671524048, |
|
"eval_runtime": 1222.3047, |
|
"eval_samples_per_second": 5.099, |
|
"eval_steps_per_second": 0.638, |
|
"eval_wer": 0.8477723462389175, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 9.511363636363634e-05, |
|
"loss": 2.0107, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 9.426136363636363e-05, |
|
"loss": 1.6357, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 9.34090909090909e-05, |
|
"loss": 1.278, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 9.255681818181818e-05, |
|
"loss": 1.0386, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 9.170454545454544e-05, |
|
"loss": 0.8257, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 9.085227272727272e-05, |
|
"loss": 1.9624, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 1.531, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 8.914772727272728e-05, |
|
"loss": 1.1868, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 8.829545454545453e-05, |
|
"loss": 0.9335, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 8.744318181818182e-05, |
|
"loss": 0.7307, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 8.659090909090909e-05, |
|
"loss": 1.8045, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 8.573863636363634e-05, |
|
"loss": 1.3896, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 8.488636363636363e-05, |
|
"loss": 1.1009, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 8.40340909090909e-05, |
|
"loss": 0.8753, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 8.318181818181818e-05, |
|
"loss": 0.7443, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 8.232954545454544e-05, |
|
"loss": 1.6411, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 8.147727272727272e-05, |
|
"loss": 1.2826, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 8.0625e-05, |
|
"loss": 0.9912, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 7.977272727272727e-05, |
|
"loss": 0.8302, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 7.892045454545454e-05, |
|
"loss": 0.732, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 7.806818181818182e-05, |
|
"loss": 1.4946, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 7.721590909090909e-05, |
|
"loss": 1.1615, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 7.636363636363635e-05, |
|
"loss": 0.935, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 7.551136363636363e-05, |
|
"loss": 0.7748, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 7.46590909090909e-05, |
|
"loss": 0.6236, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 7.380681818181817e-05, |
|
"loss": 1.4838, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 7.295454545454546e-05, |
|
"loss": 1.0713, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 7.210227272727271e-05, |
|
"loss": 0.8608, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 7.125e-05, |
|
"loss": 0.7445, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 7.039772727272727e-05, |
|
"loss": 0.6349, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 6.954545454545454e-05, |
|
"loss": 1.3051, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 6.869318181818181e-05, |
|
"loss": 0.9718, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 6.784090909090908e-05, |
|
"loss": 0.7319, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 6.698863636363636e-05, |
|
"loss": 1.0037, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 6.613636363636363e-05, |
|
"loss": 0.9463, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 6.52840909090909e-05, |
|
"loss": 0.7057, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 6.443181818181817e-05, |
|
"loss": 0.5883, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 6.357954545454546e-05, |
|
"loss": 0.5031, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 6.272727272727272e-05, |
|
"loss": 0.9323, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 6.187499999999999e-05, |
|
"loss": 1.0018, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"eval_loss": 0.7969938516616821, |
|
"eval_runtime": 1370.1693, |
|
"eval_samples_per_second": 4.549, |
|
"eval_steps_per_second": 0.569, |
|
"eval_wer": 0.6274604561253508, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 6.102272727272726e-05, |
|
"loss": 0.7135, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 6.017045454545454e-05, |
|
"loss": 0.5946, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 5.931818181818181e-05, |
|
"loss": 0.4899, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 5.846590909090909e-05, |
|
"loss": 0.8917, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 5.761363636363636e-05, |
|
"loss": 0.9219, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 5.676136363636363e-05, |
|
"loss": 0.6885, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 5.590909090909091e-05, |
|
"loss": 0.5763, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 5.505681818181818e-05, |
|
"loss": 0.5006, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 5.4204545454545454e-05, |
|
"loss": 0.7933, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 5.335227272727272e-05, |
|
"loss": 0.8369, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 5.2499999999999995e-05, |
|
"loss": 0.6639, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 5.1647727272727266e-05, |
|
"loss": 0.5441, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 5.0795454545454536e-05, |
|
"loss": 0.4834, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 4.9943181818181814e-05, |
|
"loss": 0.7445, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 4.9090909090909084e-05, |
|
"loss": 0.784, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 4.823863636363636e-05, |
|
"loss": 0.6199, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 4.738636363636363e-05, |
|
"loss": 0.5361, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 4.653409090909091e-05, |
|
"loss": 0.46, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 4.568181818181818e-05, |
|
"loss": 0.7264, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 4.482954545454546e-05, |
|
"loss": 0.7638, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 4.397727272727272e-05, |
|
"loss": 0.601, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 4.312499999999999e-05, |
|
"loss": 0.5139, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 4.227272727272727e-05, |
|
"loss": 0.4643, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 4.142045454545454e-05, |
|
"loss": 0.6763, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 4.0568181818181816e-05, |
|
"loss": 0.7152, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 3.971590909090909e-05, |
|
"loss": 0.5839, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 3.8863636363636364e-05, |
|
"loss": 0.5277, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 3.8011363636363634e-05, |
|
"loss": 0.4187, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 3.7159090909090905e-05, |
|
"loss": 0.6736, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 3.6306818181818175e-05, |
|
"loss": 0.6799, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 3.545454545454545e-05, |
|
"loss": 0.5369, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 3.460227272727272e-05, |
|
"loss": 0.4819, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.375e-05, |
|
"loss": 0.4336, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 3.289772727272727e-05, |
|
"loss": 0.6435, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 3.204545454545454e-05, |
|
"loss": 0.6614, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 3.119318181818181e-05, |
|
"loss": 0.5409, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 3.034090909090909e-05, |
|
"loss": 0.4494, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 2.9488636363636363e-05, |
|
"loss": 0.428, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 2.8636363636363634e-05, |
|
"loss": 0.6517, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 2.7784090909090907e-05, |
|
"loss": 0.6312, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"eval_loss": 0.5575253963470459, |
|
"eval_runtime": 1367.3048, |
|
"eval_samples_per_second": 4.559, |
|
"eval_steps_per_second": 0.57, |
|
"eval_wer": 0.5218769167859334, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 2.6931818181818178e-05, |
|
"loss": 0.5375, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 2.6079545454545452e-05, |
|
"loss": 0.4596, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 2.5227272727272726e-05, |
|
"loss": 0.3776, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 2.4375e-05, |
|
"loss": 0.611, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 2.3522727272727273e-05, |
|
"loss": 0.6419, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 2.267045454545454e-05, |
|
"loss": 0.5316, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 2.1818181818181814e-05, |
|
"loss": 0.4529, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 2.0965909090909088e-05, |
|
"loss": 0.4011, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 2.0113636363636362e-05, |
|
"loss": 0.5759, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 1.9261363636363636e-05, |
|
"loss": 0.5917, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 1.8409090909090907e-05, |
|
"loss": 0.5131, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 1.755681818181818e-05, |
|
"loss": 0.4384, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 1.670454545454545e-05, |
|
"loss": 0.386, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 1.5852272727272725e-05, |
|
"loss": 0.5591, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 0.6046, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 1.4147727272727271e-05, |
|
"loss": 0.4815, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 1.3295454545454545e-05, |
|
"loss": 0.4244, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 1.2443181818181819e-05, |
|
"loss": 0.3968, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 1.1590909090909089e-05, |
|
"loss": 0.5641, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 1.0738636363636363e-05, |
|
"loss": 0.5865, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 9.886363636363635e-06, |
|
"loss": 0.4954, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 9.034090909090907e-06, |
|
"loss": 0.4475, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 8.181818181818181e-06, |
|
"loss": 0.3937, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 7.329545454545454e-06, |
|
"loss": 0.5145, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 6.4772727272727265e-06, |
|
"loss": 0.5883, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 5.6249999999999995e-06, |
|
"loss": 0.4964, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 4.772727272727272e-06, |
|
"loss": 0.4294, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 3.920454545454545e-06, |
|
"loss": 0.3601, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 3.0681818181818178e-06, |
|
"loss": 0.5155, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 2.215909090909091e-06, |
|
"loss": 0.5326, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 1.3636363636363634e-06, |
|
"loss": 0.4416, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 5.113636363636363e-07, |
|
"loss": 0.3953, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 3920, |
|
"total_flos": 0.0, |
|
"train_loss": 2.0088163537030317, |
|
"train_runtime": 35632.9758, |
|
"train_samples_per_second": 7.045, |
|
"train_steps_per_second": 0.11 |
|
} |
|
], |
|
"max_steps": 3920, |
|
"num_train_epochs": 5, |
|
"total_flos": 0.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|