|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 99.9957805907173, |
|
"global_step": 11800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00019800000000000002, |
|
"loss": 5.6106, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.000398, |
|
"loss": 2.7044, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.000598, |
|
"loss": 0.8324, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.0007980000000000001, |
|
"loss": 0.345, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 0.000998, |
|
"loss": 0.2548, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"eval_loss": 0.2469930201768875, |
|
"eval_runtime": 294.3209, |
|
"eval_samples_per_second": 21.959, |
|
"eval_steps_per_second": 0.306, |
|
"eval_wer": 0.36629738582545746, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 0.0009998106236074637, |
|
"loss": 0.2203, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 0.0009992349703990264, |
|
"loss": 0.1774, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 0.0009982734659650546, |
|
"loss": 0.1643, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 0.0009969268534379132, |
|
"loss": 0.1516, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 0.00099519617359424, |
|
"loss": 0.1435, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"eval_loss": 0.20002000033855438, |
|
"eval_runtime": 295.0975, |
|
"eval_samples_per_second": 21.901, |
|
"eval_steps_per_second": 0.305, |
|
"eval_wer": 0.2791095533162254, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"learning_rate": 0.0009930827640505444, |
|
"loss": 0.1393, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 10.17, |
|
"learning_rate": 0.0009905882582293853, |
|
"loss": 0.1323, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 11.02, |
|
"learning_rate": 0.0009877145840969208, |
|
"loss": 0.1317, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 11.86, |
|
"learning_rate": 0.0009844639626728146, |
|
"loss": 0.1156, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 12.71, |
|
"learning_rate": 0.000980838906313641, |
|
"loss": 0.1158, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 12.71, |
|
"eval_loss": 0.20303700864315033, |
|
"eval_runtime": 301.1246, |
|
"eval_samples_per_second": 21.463, |
|
"eval_steps_per_second": 0.299, |
|
"eval_wer": 0.26521457929106423, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 13.56, |
|
"learning_rate": 0.0009768422167711244, |
|
"loss": 0.1174, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 14.41, |
|
"learning_rate": 0.0009724769830267044, |
|
"loss": 0.1118, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 15.25, |
|
"learning_rate": 0.0009677465789041117, |
|
"loss": 0.1115, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 0.0009626546604617881, |
|
"loss": 0.1149, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 16.95, |
|
"learning_rate": 0.0009572051631671743, |
|
"loss": 0.1094, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 16.95, |
|
"eval_loss": 0.20957913994789124, |
|
"eval_runtime": 294.9247, |
|
"eval_samples_per_second": 21.914, |
|
"eval_steps_per_second": 0.305, |
|
"eval_wer": 0.26045774133650457, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 17.79, |
|
"learning_rate": 0.0009514022988550459, |
|
"loss": 0.1049, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 18.64, |
|
"learning_rate": 0.0009452505524722506, |
|
"loss": 0.1031, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 19.49, |
|
"learning_rate": 0.0009387546786113578, |
|
"loss": 0.0991, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 20.34, |
|
"learning_rate": 0.0009319196978359077, |
|
"loss": 0.1015, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 21.19, |
|
"learning_rate": 0.0009247508928000911, |
|
"loss": 0.1004, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 21.19, |
|
"eval_loss": 0.21501973271369934, |
|
"eval_runtime": 295.0309, |
|
"eval_samples_per_second": 21.906, |
|
"eval_steps_per_second": 0.305, |
|
"eval_wer": 0.2477311134756212, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 22.03, |
|
"learning_rate": 0.0009172538041658665, |
|
"loss": 0.0949, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 22.88, |
|
"learning_rate": 0.0009094342263206646, |
|
"loss": 0.0869, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 23.73, |
|
"learning_rate": 0.0009012982028989929, |
|
"loss": 0.0971, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 24.57, |
|
"learning_rate": 0.000892852022111403, |
|
"loss": 0.0907, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 25.42, |
|
"learning_rate": 0.0008841022118844243, |
|
"loss": 0.0945, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 25.42, |
|
"eval_loss": 0.20718325674533844, |
|
"eval_runtime": 295.2378, |
|
"eval_samples_per_second": 21.891, |
|
"eval_steps_per_second": 0.305, |
|
"eval_wer": 0.2369030481316893, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 26.27, |
|
"learning_rate": 0.0008750555348152298, |
|
"loss": 0.0884, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 27.12, |
|
"learning_rate": 0.0008657189829449246, |
|
"loss": 0.0894, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 27.96, |
|
"learning_rate": 0.0008560997723545024, |
|
"loss": 0.0851, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 28.81, |
|
"learning_rate": 0.0008462053375876455, |
|
"loss": 0.09, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 29.66, |
|
"learning_rate": 0.0008360433259046764, |
|
"loss": 0.0844, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 29.66, |
|
"eval_loss": 0.1981162428855896, |
|
"eval_runtime": 296.5443, |
|
"eval_samples_per_second": 21.794, |
|
"eval_steps_per_second": 0.303, |
|
"eval_wer": 0.23275124658363064, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 30.51, |
|
"learning_rate": 0.0008256215913721052, |
|
"loss": 0.084, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 31.35, |
|
"learning_rate": 0.0008149481887923395, |
|
"loss": 0.081, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 32.2, |
|
"learning_rate": 0.0008040313674782489, |
|
"loss": 0.0812, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 33.05, |
|
"learning_rate": 0.0007928795648773933, |
|
"loss": 0.0836, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 33.89, |
|
"learning_rate": 0.0007815014000508474, |
|
"loss": 0.0877, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 33.89, |
|
"eval_loss": 0.20411191880702972, |
|
"eval_runtime": 294.5281, |
|
"eval_samples_per_second": 21.944, |
|
"eval_steps_per_second": 0.306, |
|
"eval_wer": 0.24249441906073313, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 34.74, |
|
"learning_rate": 0.0007699056670116568, |
|
"loss": 0.0842, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 35.59, |
|
"learning_rate": 0.0007581013279280784, |
|
"loss": 0.0802, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 36.44, |
|
"learning_rate": 0.0007460975061968531, |
|
"loss": 0.0812, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 37.29, |
|
"learning_rate": 0.0007339034793918703, |
|
"loss": 0.0754, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 38.14, |
|
"learning_rate": 0.00072152867209367, |
|
"loss": 0.0741, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 38.14, |
|
"eval_loss": 0.235269233584404, |
|
"eval_runtime": 301.8823, |
|
"eval_samples_per_second": 21.409, |
|
"eval_steps_per_second": 0.298, |
|
"eval_wer": 0.24213974254657736, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 38.98, |
|
"learning_rate": 0.0007089826486053256, |
|
"loss": 0.0752, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 39.83, |
|
"learning_rate": 0.0006962751055603378, |
|
"loss": 0.0725, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 40.68, |
|
"learning_rate": 0.0006834158644282511, |
|
"loss": 0.0712, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 41.52, |
|
"learning_rate": 0.000670414863923788, |
|
"loss": 0.0673, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 42.37, |
|
"learning_rate": 0.0006572821523253649, |
|
"loss": 0.0676, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 42.37, |
|
"eval_loss": 0.20924021303653717, |
|
"eval_runtime": 294.7332, |
|
"eval_samples_per_second": 21.928, |
|
"eval_steps_per_second": 0.305, |
|
"eval_wer": 0.22127641818447352, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 43.22, |
|
"learning_rate": 0.000644027879708929, |
|
"loss": 0.0691, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 44.07, |
|
"learning_rate": 0.0006306622901031152, |
|
"loss": 0.0621, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 44.91, |
|
"learning_rate": 0.000617195713571792, |
|
"loss": 0.0648, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 45.76, |
|
"learning_rate": 0.0006036385582301083, |
|
"loss": 0.0682, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 46.61, |
|
"learning_rate": 0.0005900013022002183, |
|
"loss": 0.0623, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 46.61, |
|
"eval_loss": 0.22174060344696045, |
|
"eval_runtime": 298.483, |
|
"eval_samples_per_second": 21.653, |
|
"eval_steps_per_second": 0.302, |
|
"eval_wer": 0.2250318165696522, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 47.46, |
|
"learning_rate": 0.0005762944855128968, |
|
"loss": 0.0702, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 48.3, |
|
"learning_rate": 0.0005625287019613087, |
|
"loss": 0.0597, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 49.15, |
|
"learning_rate": 0.0005487145909132243, |
|
"loss": 0.0557, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 0.0005348628290880112, |
|
"loss": 0.0547, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 50.84, |
|
"learning_rate": 0.0005209841223047587, |
|
"loss": 0.0574, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 50.84, |
|
"eval_loss": 0.21524770557880402, |
|
"eval_runtime": 297.0196, |
|
"eval_samples_per_second": 21.76, |
|
"eval_steps_per_second": 0.303, |
|
"eval_wer": 0.2179382862865369, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 51.69, |
|
"learning_rate": 0.00050708919720791, |
|
"loss": 0.0571, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 52.54, |
|
"learning_rate": 0.0004931887929768004, |
|
"loss": 0.0557, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 53.39, |
|
"learning_rate": 0.0004792936530255048, |
|
"loss": 0.0513, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 54.24, |
|
"learning_rate": 0.00046541451669941626, |
|
"loss": 0.0542, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 55.08, |
|
"learning_rate": 0.00045156211097496503, |
|
"loss": 0.0583, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 55.08, |
|
"eval_loss": 0.22067895531654358, |
|
"eval_runtime": 297.4094, |
|
"eval_samples_per_second": 21.731, |
|
"eval_steps_per_second": 0.303, |
|
"eval_wer": 0.21860591266612422, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 55.93, |
|
"learning_rate": 0.00043774714216890067, |
|
"loss": 0.0516, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 56.78, |
|
"learning_rate": 0.000423980287663539, |
|
"loss": 0.0552, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 57.62, |
|
"learning_rate": 0.00041027218765437467, |
|
"loss": 0.0545, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 58.47, |
|
"learning_rate": 0.0003966334369264325, |
|
"loss": 0.0497, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 59.32, |
|
"learning_rate": 0.00038307457666571714, |
|
"loss": 0.0488, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 59.32, |
|
"eval_loss": 0.22246450185775757, |
|
"eval_runtime": 291.806, |
|
"eval_samples_per_second": 22.148, |
|
"eval_steps_per_second": 0.308, |
|
"eval_wer": 0.21593540714777493, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 60.17, |
|
"learning_rate": 0.000369606086312089, |
|
"loss": 0.0467, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 61.02, |
|
"learning_rate": 0.00035623837545986186, |
|
"loss": 0.0496, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 61.86, |
|
"learning_rate": 0.0003429817758123833, |
|
"loss": 0.0477, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 62.71, |
|
"learning_rate": 0.00032984653319681614, |
|
"loss": 0.0491, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 63.56, |
|
"learning_rate": 0.0003168427996452925, |
|
"loss": 0.0456, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 63.56, |
|
"eval_loss": 0.22932520508766174, |
|
"eval_runtime": 293.0052, |
|
"eval_samples_per_second": 22.058, |
|
"eval_steps_per_second": 0.307, |
|
"eval_wer": 0.20310446266508106, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 64.41, |
|
"learning_rate": 0.0003039806255485599, |
|
"loss": 0.042, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 65.25, |
|
"learning_rate": 0.0002912699518881855, |
|
"loss": 0.0431, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 66.1, |
|
"learning_rate": 0.00027872060255331935, |
|
"loss": 0.042, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 66.95, |
|
"learning_rate": 0.0002663422767479589, |
|
"loss": 0.0422, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 67.79, |
|
"learning_rate": 0.0002541445414945791, |
|
"loss": 0.041, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 67.79, |
|
"eval_loss": 0.22772204875946045, |
|
"eval_runtime": 292.1439, |
|
"eval_samples_per_second": 22.123, |
|
"eval_steps_per_second": 0.308, |
|
"eval_wer": 0.20128935344557802, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 68.64, |
|
"learning_rate": 0.000242136824239924, |
|
"loss": 0.042, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 69.49, |
|
"learning_rate": 0.00023032840556867502, |
|
"loss": 0.0404, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 70.34, |
|
"learning_rate": 0.00021872841203062537, |
|
"loss": 0.0413, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 71.19, |
|
"learning_rate": 0.0002074585300038035, |
|
"loss": 0.0385, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 72.03, |
|
"learning_rate": 0.00019629981023833903, |
|
"loss": 0.0379, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 72.03, |
|
"eval_loss": 0.22867247462272644, |
|
"eval_runtime": 297.2969, |
|
"eval_samples_per_second": 21.739, |
|
"eval_steps_per_second": 0.303, |
|
"eval_wer": 0.199077841063195, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 72.88, |
|
"learning_rate": 0.00018537581579734625, |
|
"loss": 0.037, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 73.73, |
|
"learning_rate": 0.00017469498967238802, |
|
"loss": 0.0362, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 74.57, |
|
"learning_rate": 0.00016426558691388015, |
|
"loss": 0.0348, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 75.42, |
|
"learning_rate": 0.0001540956682508869, |
|
"loss": 0.0367, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 76.27, |
|
"learning_rate": 0.00014419309386110528, |
|
"loss": 0.0381, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 76.27, |
|
"eval_loss": 0.22330239415168762, |
|
"eval_runtime": 290.8488, |
|
"eval_samples_per_second": 22.221, |
|
"eval_steps_per_second": 0.309, |
|
"eval_wer": 0.19536416932674053, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 77.12, |
|
"learning_rate": 0.00013456551729585032, |
|
"loss": 0.0343, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 77.96, |
|
"learning_rate": 0.00012522037956474042, |
|
"loss": 0.033, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 78.81, |
|
"learning_rate": 0.00011616490338465268, |
|
"loss": 0.032, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 79.66, |
|
"learning_rate": 0.00010740608759739317, |
|
"loss": 0.031, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 80.51, |
|
"learning_rate": 9.895070176039717e-05, |
|
"loss": 0.0308, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 80.51, |
|
"eval_loss": 0.21947245299816132, |
|
"eval_runtime": 295.835, |
|
"eval_samples_per_second": 21.847, |
|
"eval_steps_per_second": 0.304, |
|
"eval_wer": 0.18349293776470343, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 81.35, |
|
"learning_rate": 9.080528091463946e-05, |
|
"loss": 0.0309, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 82.2, |
|
"learning_rate": 8.297612053379883e-05, |
|
"loss": 0.0302, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 83.05, |
|
"learning_rate": 7.546927165858153e-05, |
|
"loss": 0.0296, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 83.89, |
|
"learning_rate": 6.82905362199625e-05, |
|
"loss": 0.0289, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 84.74, |
|
"learning_rate": 6.14454625549593e-05, |
|
"loss": 0.0291, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 84.74, |
|
"eval_loss": 0.22659379243850708, |
|
"eval_runtime": 301.2419, |
|
"eval_samples_per_second": 21.455, |
|
"eval_steps_per_second": 0.299, |
|
"eval_wer": 0.18251236151968456, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 85.59, |
|
"learning_rate": 5.493934111840604e-05, |
|
"loss": 0.029, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 86.44, |
|
"learning_rate": 4.87772003940401e-05, |
|
"loss": 0.0276, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 87.29, |
|
"learning_rate": 4.296380300806219e-05, |
|
"loss": 0.0285, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 88.14, |
|
"learning_rate": 3.750364204817463e-05, |
|
"loss": 0.0287, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 88.98, |
|
"learning_rate": 3.245018204574429e-05, |
|
"loss": 0.0266, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 88.98, |
|
"eval_loss": 0.22852544486522675, |
|
"eval_runtime": 296.9937, |
|
"eval_samples_per_second": 21.761, |
|
"eval_steps_per_second": 0.303, |
|
"eval_wer": 0.1800713525693184, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 89.83, |
|
"learning_rate": 2.7705245284789292e-05, |
|
"loss": 0.0269, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 90.68, |
|
"learning_rate": 2.3325338060476542e-05, |
|
"loss": 0.0262, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 91.52, |
|
"learning_rate": 1.9313845537448972e-05, |
|
"loss": 0.0257, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 92.37, |
|
"learning_rate": 1.567386813814875e-05, |
|
"loss": 0.0271, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 93.22, |
|
"learning_rate": 1.2408219146547828e-05, |
|
"loss": 0.0266, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 93.22, |
|
"eval_loss": 0.229219451546669, |
|
"eval_runtime": 299.2804, |
|
"eval_samples_per_second": 21.595, |
|
"eval_steps_per_second": 0.301, |
|
"eval_wer": 0.1801130792180426, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 94.07, |
|
"learning_rate": 9.519422533802668e-06, |
|
"loss": 0.027, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 94.91, |
|
"learning_rate": 7.009711007514796e-06, |
|
"loss": 0.0266, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 95.76, |
|
"learning_rate": 4.8810242861045405e-06, |
|
"loss": 0.0258, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 96.61, |
|
"learning_rate": 3.1350075996317563e-06, |
|
"loss": 0.0256, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 97.46, |
|
"learning_rate": 1.773010418222143e-06, |
|
"loss": 0.0262, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 97.46, |
|
"eval_loss": 0.22784815728664398, |
|
"eval_runtime": 323.2034, |
|
"eval_samples_per_second": 19.997, |
|
"eval_steps_per_second": 0.278, |
|
"eval_wer": 0.17881955310759218, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 98.3, |
|
"learning_rate": 7.96085409082159e-07, |
|
"loss": 0.0267, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 99.15, |
|
"learning_rate": 2.049876229081038e-07, |
|
"loss": 0.026, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 1.7391031886804242e-10, |
|
"loss": 0.0257, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 11800, |
|
"total_flos": 2.7805901871717807e+20, |
|
"train_loss": 0.14547648023750823, |
|
"train_runtime": 96097.4367, |
|
"train_samples_per_second": 17.722, |
|
"train_steps_per_second": 0.123 |
|
} |
|
], |
|
"max_steps": 11800, |
|
"num_train_epochs": 100, |
|
"total_flos": 2.7805901871717807e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|