|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 15.118110236220472, |
|
"eval_steps": 500, |
|
"global_step": 600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000499599358974359, |
|
"loss": 3.1389, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000499198717948718, |
|
"loss": 2.8569, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0004987980769230769, |
|
"loss": 2.75, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0004983974358974359, |
|
"loss": 2.7375, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0004979967948717949, |
|
"loss": 2.7342, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0004975961538461539, |
|
"loss": 2.753, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0004971955128205128, |
|
"loss": 2.7124, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0004967948717948718, |
|
"loss": 2.7038, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0004963942307692308, |
|
"loss": 2.6142, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0004959935897435898, |
|
"loss": 2.6427, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0004955929487179487, |
|
"loss": 2.6265, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0004951923076923077, |
|
"loss": 2.6352, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0004947916666666667, |
|
"loss": 2.616, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0004943910256410257, |
|
"loss": 2.6937, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0004939903846153846, |
|
"loss": 2.6719, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0004935897435897436, |
|
"loss": 2.6043, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0004931891025641026, |
|
"loss": 2.565, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0004927884615384616, |
|
"loss": 2.5776, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0004923878205128205, |
|
"loss": 2.6326, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0004919871794871795, |
|
"loss": 2.5868, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0004915865384615384, |
|
"loss": 2.5499, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0004911858974358974, |
|
"loss": 2.5823, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0004907852564102564, |
|
"loss": 2.5858, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0004903846153846154, |
|
"loss": 2.5688, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0004899839743589743, |
|
"loss": 2.57, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0004895833333333333, |
|
"loss": 2.5569, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0004891826923076923, |
|
"loss": 2.5067, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0004887820512820513, |
|
"loss": 2.4936, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004883814102564103, |
|
"loss": 2.5958, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00048798076923076925, |
|
"loss": 2.5213, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004875801282051282, |
|
"loss": 2.6369, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004871794871794872, |
|
"loss": 2.578, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00048677884615384615, |
|
"loss": 2.6214, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00048637820512820515, |
|
"loss": 2.6193, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0004859775641025641, |
|
"loss": 2.5948, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0004855769230769231, |
|
"loss": 2.5615, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00048517628205128205, |
|
"loss": 2.5246, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00048477564102564105, |
|
"loss": 2.4585, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.000484375, |
|
"loss": 2.5621, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.000483974358974359, |
|
"loss": 2.571, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00048357371794871795, |
|
"loss": 2.5382, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00048317307692307695, |
|
"loss": 2.5028, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0004827724358974359, |
|
"loss": 2.4812, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0004823717948717949, |
|
"loss": 2.5161, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00048197115384615384, |
|
"loss": 2.4557, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00048157051282051285, |
|
"loss": 2.4617, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0004811698717948718, |
|
"loss": 2.5267, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0004807692307692308, |
|
"loss": 2.4555, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00048036858974358974, |
|
"loss": 2.37, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00047996794871794875, |
|
"loss": 2.4829, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0004795673076923077, |
|
"loss": 2.5022, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0004791666666666667, |
|
"loss": 2.4958, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0004787660256410257, |
|
"loss": 2.4358, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00047836538461538464, |
|
"loss": 2.4742, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00047796474358974365, |
|
"loss": 2.4158, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0004775641025641026, |
|
"loss": 2.5216, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0004771634615384616, |
|
"loss": 2.5239, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00047676282051282054, |
|
"loss": 2.3946, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0004763621794871795, |
|
"loss": 2.4457, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00047596153846153844, |
|
"loss": 2.4379, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00047556089743589744, |
|
"loss": 2.4812, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0004751602564102564, |
|
"loss": 2.4153, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0004747596153846154, |
|
"loss": 2.4115, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00047435897435897434, |
|
"loss": 2.3637, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00047395833333333334, |
|
"loss": 2.3741, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0004735576923076923, |
|
"loss": 2.4461, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0004731570512820513, |
|
"loss": 2.4694, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00047275641025641024, |
|
"loss": 2.5177, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00047235576923076924, |
|
"loss": 2.4964, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0004719551282051282, |
|
"loss": 2.5114, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0004715544871794872, |
|
"loss": 2.4447, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00047115384615384613, |
|
"loss": 2.4112, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00047075320512820514, |
|
"loss": 2.4585, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0004703525641025641, |
|
"loss": 2.4696, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0004699519230769231, |
|
"loss": 2.3813, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00046955128205128203, |
|
"loss": 2.4962, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00046915064102564104, |
|
"loss": 2.4392, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00046875, |
|
"loss": 2.3436, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.000468349358974359, |
|
"loss": 2.4758, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.000467948717948718, |
|
"loss": 2.4026, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00046754807692307693, |
|
"loss": 2.3728, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.00046714743589743594, |
|
"loss": 2.395, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.0004667467948717949, |
|
"loss": 2.2767, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.0004663461538461539, |
|
"loss": 2.3483, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00046594551282051283, |
|
"loss": 2.2911, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00046554487179487183, |
|
"loss": 2.3502, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0004651442307692308, |
|
"loss": 2.3532, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0004647435897435898, |
|
"loss": 2.266, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00046434294871794873, |
|
"loss": 2.3046, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00046394230769230773, |
|
"loss": 2.2458, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.0004635416666666667, |
|
"loss": 2.3324, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.0004631410256410257, |
|
"loss": 2.3016, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00046274038461538463, |
|
"loss": 2.379, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00046233974358974363, |
|
"loss": 2.3075, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.0004619391025641026, |
|
"loss": 2.3942, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.0004615384615384616, |
|
"loss": 2.2891, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.00046113782051282053, |
|
"loss": 2.4199, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0004607371794871795, |
|
"loss": 2.3662, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.0004603365384615384, |
|
"loss": 2.3346, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0004599358974358974, |
|
"loss": 2.2607, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.0004595352564102564, |
|
"loss": 2.3446, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.0004591346153846154, |
|
"loss": 2.3649, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.0004587339743589743, |
|
"loss": 2.2394, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0004583333333333333, |
|
"loss": 2.3278, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.0004579326923076923, |
|
"loss": 2.2725, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.0004575320512820513, |
|
"loss": 2.3328, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0004571314102564103, |
|
"loss": 2.296, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.0004567307692307692, |
|
"loss": 2.2562, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.0004563301282051282, |
|
"loss": 2.3062, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0004559294871794872, |
|
"loss": 2.3125, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.0004555288461538462, |
|
"loss": 2.3257, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.0004551282051282051, |
|
"loss": 2.3431, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.0004547275641025641, |
|
"loss": 2.3507, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00045432692307692307, |
|
"loss": 2.3621, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.0004539262820512821, |
|
"loss": 2.2937, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.000453525641025641, |
|
"loss": 2.3356, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.000453125, |
|
"loss": 2.2228, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00045272435897435897, |
|
"loss": 2.3189, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.00045232371794871797, |
|
"loss": 2.3605, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.0004519230769230769, |
|
"loss": 2.1857, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.0004515224358974359, |
|
"loss": 2.1538, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.00045112179487179487, |
|
"loss": 2.2092, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.00045072115384615387, |
|
"loss": 2.2089, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.0004503205128205128, |
|
"loss": 2.1746, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.0004499198717948718, |
|
"loss": 2.1455, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 0.00044951923076923077, |
|
"loss": 2.119, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00044911858974358977, |
|
"loss": 2.2341, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.0004487179487179487, |
|
"loss": 2.2275, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.0004483173076923077, |
|
"loss": 2.176, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.0004479166666666667, |
|
"loss": 2.1187, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.00044751602564102567, |
|
"loss": 2.1175, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.00044711538461538467, |
|
"loss": 2.1818, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.0004467147435897436, |
|
"loss": 2.1193, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.0004463141025641026, |
|
"loss": 2.2079, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.00044591346153846157, |
|
"loss": 2.1361, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.00044551282051282057, |
|
"loss": 2.2246, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.00044511217948717946, |
|
"loss": 2.1139, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.00044471153846153846, |
|
"loss": 2.1058, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.0004443108974358974, |
|
"loss": 2.1442, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.0004439102564102564, |
|
"loss": 2.1039, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.00044350961538461536, |
|
"loss": 2.1884, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.00044310897435897436, |
|
"loss": 2.21, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.0004427083333333333, |
|
"loss": 2.1139, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 0.0004423076923076923, |
|
"loss": 2.1049, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.00044190705128205126, |
|
"loss": 2.1282, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.00044150641025641026, |
|
"loss": 2.1803, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.0004411057692307692, |
|
"loss": 2.1406, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.0004407051282051282, |
|
"loss": 2.1841, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.00044030448717948716, |
|
"loss": 2.1961, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.00043990384615384616, |
|
"loss": 2.1265, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.0004395032051282051, |
|
"loss": 2.1708, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.0004391025641025641, |
|
"loss": 2.1555, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 0.00043870192307692306, |
|
"loss": 2.1463, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.00043830128205128206, |
|
"loss": 2.1947, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.000437900641025641, |
|
"loss": 2.1962, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.0004375, |
|
"loss": 2.2109, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 0.000437099358974359, |
|
"loss": 2.1556, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.00043669871794871796, |
|
"loss": 2.1605, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 0.00043629807692307696, |
|
"loss": 2.155, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 0.0004358974358974359, |
|
"loss": 2.0347, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 0.0004354967948717949, |
|
"loss": 1.9321, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 0.00043509615384615386, |
|
"loss": 2.0034, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 0.00043469551282051286, |
|
"loss": 2.0203, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 0.0004342948717948718, |
|
"loss": 1.9189, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 0.0004338942307692308, |
|
"loss": 1.9868, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 0.00043349358974358976, |
|
"loss": 1.9343, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 0.00043309294871794876, |
|
"loss": 1.8894, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 0.0004326923076923077, |
|
"loss": 1.9735, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 0.0004322916666666667, |
|
"loss": 2.0012, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 0.00043189102564102565, |
|
"loss": 1.9087, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 0.00043149038461538466, |
|
"loss": 1.9213, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 0.0004310897435897436, |
|
"loss": 2.0195, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 0.0004306891025641026, |
|
"loss": 1.9806, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 0.00043028846153846155, |
|
"loss": 1.9731, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 0.00042988782051282056, |
|
"loss": 1.9282, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 0.00042948717948717945, |
|
"loss": 1.9433, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 0.00042908653846153845, |
|
"loss": 1.9633, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 0.0004286858974358974, |
|
"loss": 1.9918, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 0.0004282852564102564, |
|
"loss": 1.9265, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 0.00042788461538461535, |
|
"loss": 1.9198, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 0.00042748397435897435, |
|
"loss": 2.0159, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 0.0004270833333333333, |
|
"loss": 1.9474, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 0.0004266826923076923, |
|
"loss": 2.0152, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 0.0004262820512820513, |
|
"loss": 1.9006, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 0.00042588141025641025, |
|
"loss": 1.9862, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 0.00042548076923076925, |
|
"loss": 1.9945, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 0.0004250801282051282, |
|
"loss": 1.985, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 0.0004246794871794872, |
|
"loss": 1.9899, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 0.00042427884615384615, |
|
"loss": 2.0149, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 0.00042387820512820515, |
|
"loss": 1.9768, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 0.0004234775641025641, |
|
"loss": 2.0204, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 0.0004230769230769231, |
|
"loss": 1.9343, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 0.00042267628205128205, |
|
"loss": 2.0574, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 0.00042227564102564105, |
|
"loss": 2.0245, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 0.000421875, |
|
"loss": 2.0356, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 0.000421474358974359, |
|
"loss": 1.9984, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 0.00042107371794871794, |
|
"loss": 1.9826, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.00042067307692307695, |
|
"loss": 1.9846, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 0.0004202724358974359, |
|
"loss": 1.8931, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 0.0004198717948717949, |
|
"loss": 1.7642, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 0.00041947115384615384, |
|
"loss": 1.7235, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 0.00041907051282051285, |
|
"loss": 1.7058, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 0.0004186698717948718, |
|
"loss": 1.8022, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 0.0004182692307692308, |
|
"loss": 1.7819, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 0.00041786858974358974, |
|
"loss": 1.7047, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 0.00041746794871794874, |
|
"loss": 1.773, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 0.0004170673076923077, |
|
"loss": 1.7324, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 0.0004166666666666667, |
|
"loss": 1.7862, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 0.0004162660256410257, |
|
"loss": 1.764, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.00041586538461538464, |
|
"loss": 1.7156, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 0.00041546474358974364, |
|
"loss": 1.7948, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 0.0004150641025641026, |
|
"loss": 1.7608, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 0.0004146634615384616, |
|
"loss": 1.7204, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 0.00041426282051282054, |
|
"loss": 1.7691, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 0.0004138621794871795, |
|
"loss": 1.7969, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 0.00041346153846153844, |
|
"loss": 1.7194, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 0.00041306089743589744, |
|
"loss": 1.7022, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 0.0004126602564102564, |
|
"loss": 1.8287, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 0.0004122596153846154, |
|
"loss": 1.7074, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 0.00041185897435897434, |
|
"loss": 1.8354, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 0.00041145833333333334, |
|
"loss": 1.791, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 0.0004110576923076923, |
|
"loss": 1.7633, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 0.0004106570512820513, |
|
"loss": 1.811, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 0.00041025641025641023, |
|
"loss": 1.7498, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 0.00040985576923076924, |
|
"loss": 1.7662, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 0.0004094551282051282, |
|
"loss": 1.7686, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 0.0004090544871794872, |
|
"loss": 1.8357, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 0.00040865384615384613, |
|
"loss": 1.7981, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 0.00040825320512820513, |
|
"loss": 1.7502, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 0.0004078525641025641, |
|
"loss": 1.7666, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 0.0004074519230769231, |
|
"loss": 1.798, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 0.00040705128205128203, |
|
"loss": 1.8072, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 0.00040665064102564103, |
|
"loss": 1.7611, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 0.00040625000000000004, |
|
"loss": 1.8126, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"learning_rate": 0.000405849358974359, |
|
"loss": 1.8361, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 0.000405448717948718, |
|
"loss": 1.7938, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 0.00040504807692307693, |
|
"loss": 1.7524, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 0.00040464743589743593, |
|
"loss": 1.7601, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 0.0004042467948717949, |
|
"loss": 1.5373, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 0.0004038461538461539, |
|
"loss": 1.5108, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 0.00040344551282051283, |
|
"loss": 1.568, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 0.00040304487179487183, |
|
"loss": 1.5175, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 0.0004026442307692308, |
|
"loss": 1.5748, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 0.0004022435897435898, |
|
"loss": 1.4772, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 0.00040184294871794873, |
|
"loss": 1.5035, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 0.00040144230769230773, |
|
"loss": 1.5318, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 0.0004010416666666667, |
|
"loss": 1.5139, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 0.0004006410256410257, |
|
"loss": 1.5599, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 0.00040024038461538463, |
|
"loss": 1.5475, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 0.00039983974358974363, |
|
"loss": 1.4819, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 0.0003994391025641026, |
|
"loss": 1.5419, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 0.0003990384615384616, |
|
"loss": 1.6027, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 0.00039863782051282053, |
|
"loss": 1.5683, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 0.0003982371794871795, |
|
"loss": 1.5963, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 0.0003978365384615384, |
|
"loss": 1.5874, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 0.0003974358974358974, |
|
"loss": 1.5161, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 0.00039703525641025637, |
|
"loss": 1.57, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 0.0003966346153846154, |
|
"loss": 1.534, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 0.0003962339743589743, |
|
"loss": 1.5513, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 0.0003958333333333333, |
|
"loss": 1.5454, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 0.0003954326923076923, |
|
"loss": 1.5695, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 0.0003950320512820513, |
|
"loss": 1.5777, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 0.0003946314102564103, |
|
"loss": 1.5629, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 0.0003942307692307692, |
|
"loss": 1.5031, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 0.0003938301282051282, |
|
"loss": 1.5964, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 0.00039342948717948717, |
|
"loss": 1.5472, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 0.0003930288461538462, |
|
"loss": 1.5654, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 0.0003926282051282051, |
|
"loss": 1.5932, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 0.0003922275641025641, |
|
"loss": 1.6037, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 0.00039182692307692307, |
|
"loss": 1.6394, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 0.00039142628205128207, |
|
"loss": 1.5745, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 0.000391025641025641, |
|
"loss": 1.6512, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 0.000390625, |
|
"loss": 1.6107, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 0.00039022435897435897, |
|
"loss": 1.5798, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 0.00038982371794871797, |
|
"loss": 1.6087, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 0.0003894230769230769, |
|
"loss": 1.5362, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"learning_rate": 0.0003890224358974359, |
|
"loss": 1.5899, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 0.00038862179487179487, |
|
"loss": 1.59, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 0.00038822115384615387, |
|
"loss": 1.3327, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 0.0003878205128205128, |
|
"loss": 1.3214, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 0.0003874198717948718, |
|
"loss": 1.2891, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 0.00038701923076923077, |
|
"loss": 1.3321, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 0.00038661858974358977, |
|
"loss": 1.284, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 0.0003862179487179487, |
|
"loss": 1.3346, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"learning_rate": 0.0003858173076923077, |
|
"loss": 1.2874, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 0.0003854166666666667, |
|
"loss": 1.3104, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 0.00038501602564102567, |
|
"loss": 1.3138, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 0.00038461538461538467, |
|
"loss": 1.2871, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 0.0003842147435897436, |
|
"loss": 1.2885, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 0.0003838141025641026, |
|
"loss": 1.338, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 0.00038341346153846157, |
|
"loss": 1.3189, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 0.00038301282051282057, |
|
"loss": 1.3013, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 0.00038261217948717946, |
|
"loss": 1.3505, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 0.00038221153846153846, |
|
"loss": 1.3469, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 0.0003818108974358974, |
|
"loss": 1.3156, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 0.0003814102564102564, |
|
"loss": 1.3796, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 0.00038100961538461536, |
|
"loss": 1.3649, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 0.00038060897435897436, |
|
"loss": 1.3483, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"learning_rate": 0.0003802083333333333, |
|
"loss": 1.3481, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 0.0003798076923076923, |
|
"loss": 1.334, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 0.00037940705128205126, |
|
"loss": 1.3495, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 7.61, |
|
"learning_rate": 0.00037900641025641026, |
|
"loss": 1.4102, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 7.63, |
|
"learning_rate": 0.0003786057692307692, |
|
"loss": 1.3794, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 0.0003782051282051282, |
|
"loss": 1.3802, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 0.00037780448717948716, |
|
"loss": 1.3835, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 7.71, |
|
"learning_rate": 0.00037740384615384616, |
|
"loss": 1.4197, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"learning_rate": 0.0003770032051282051, |
|
"loss": 1.3947, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"learning_rate": 0.0003766025641025641, |
|
"loss": 1.3891, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"learning_rate": 0.00037620192307692306, |
|
"loss": 1.3904, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 7.81, |
|
"learning_rate": 0.00037580128205128206, |
|
"loss": 1.3937, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 0.000375400641025641, |
|
"loss": 1.4258, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"learning_rate": 0.000375, |
|
"loss": 1.3816, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 7.89, |
|
"learning_rate": 0.000374599358974359, |
|
"loss": 1.4251, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 0.00037419871794871796, |
|
"loss": 1.3946, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 0.00037379807692307696, |
|
"loss": 1.3968, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"learning_rate": 0.0003733974358974359, |
|
"loss": 1.4108, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 7.99, |
|
"learning_rate": 0.0003729967948717949, |
|
"loss": 1.4673, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 0.00037259615384615386, |
|
"loss": 1.2339, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 0.00037219551282051286, |
|
"loss": 1.1775, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 0.0003717948717948718, |
|
"loss": 1.1061, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 0.0003713942307692308, |
|
"loss": 1.0912, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 0.00037099358974358975, |
|
"loss": 1.1214, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"learning_rate": 0.00037059294871794876, |
|
"loss": 1.1537, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 8.16, |
|
"learning_rate": 0.0003701923076923077, |
|
"loss": 1.116, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 0.0003697916666666667, |
|
"loss": 1.1292, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 0.00036939102564102565, |
|
"loss": 1.0948, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 0.00036899038461538466, |
|
"loss": 1.0968, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 0.0003685897435897436, |
|
"loss": 1.118, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 0.0003681891025641026, |
|
"loss": 1.1488, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 8.31, |
|
"learning_rate": 0.00036778846153846155, |
|
"loss": 1.137, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"learning_rate": 0.00036738782051282055, |
|
"loss": 1.0832, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"learning_rate": 0.00036698717948717945, |
|
"loss": 1.1609, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 8.39, |
|
"learning_rate": 0.00036658653846153845, |
|
"loss": 1.1497, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"learning_rate": 0.0003661858974358974, |
|
"loss": 1.1289, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 8.44, |
|
"learning_rate": 0.0003657852564102564, |
|
"loss": 1.1938, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 0.00036538461538461535, |
|
"loss": 1.1441, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 8.49, |
|
"learning_rate": 0.00036498397435897435, |
|
"loss": 1.1233, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 0.0003645833333333333, |
|
"loss": 1.1273, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"learning_rate": 0.0003641826923076923, |
|
"loss": 1.1498, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 0.0003637820512820513, |
|
"loss": 1.1848, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"learning_rate": 0.00036338141025641025, |
|
"loss": 1.1991, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 8.62, |
|
"learning_rate": 0.00036298076923076925, |
|
"loss": 1.1705, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"learning_rate": 0.0003625801282051282, |
|
"loss": 1.1613, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"learning_rate": 0.0003621794871794872, |
|
"loss": 1.2599, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 8.69, |
|
"learning_rate": 0.00036177884615384615, |
|
"loss": 1.1824, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 8.72, |
|
"learning_rate": 0.00036137820512820515, |
|
"loss": 1.1763, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 8.74, |
|
"learning_rate": 0.0003609775641025641, |
|
"loss": 1.1882, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"learning_rate": 0.0003605769230769231, |
|
"loss": 1.2043, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 0.00036017628205128204, |
|
"loss": 1.2123, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 0.00035977564102564105, |
|
"loss": 1.1831, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"learning_rate": 0.000359375, |
|
"loss": 1.2265, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 8.87, |
|
"learning_rate": 0.000358974358974359, |
|
"loss": 1.1658, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 0.00035857371794871794, |
|
"loss": 1.1787, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"learning_rate": 0.00035817307692307695, |
|
"loss": 1.1904, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"learning_rate": 0.0003577724358974359, |
|
"loss": 1.2753, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"learning_rate": 0.0003573717948717949, |
|
"loss": 1.2076, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 0.00035697115384615384, |
|
"loss": 1.2433, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 0.00035657051282051284, |
|
"loss": 0.9733, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 0.0003561698717948718, |
|
"loss": 0.9765, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 0.0003557692307692308, |
|
"loss": 0.9626, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 0.00035536858974358974, |
|
"loss": 0.9382, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 0.00035496794871794874, |
|
"loss": 0.8644, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 9.15, |
|
"learning_rate": 0.0003545673076923077, |
|
"loss": 0.9299, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"learning_rate": 0.0003541666666666667, |
|
"loss": 0.9618, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 0.0003537660256410257, |
|
"loss": 0.9417, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"learning_rate": 0.00035336538461538464, |
|
"loss": 0.9873, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 0.00035296474358974364, |
|
"loss": 0.9135, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 0.0003525641025641026, |
|
"loss": 0.9652, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 0.0003521634615384616, |
|
"loss": 0.9347, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"learning_rate": 0.00035176282051282054, |
|
"loss": 0.914, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"learning_rate": 0.0003513621794871795, |
|
"loss": 0.9622, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 9.37, |
|
"learning_rate": 0.00035096153846153844, |
|
"loss": 0.9652, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 0.00035056089743589744, |
|
"loss": 0.8865, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 9.42, |
|
"learning_rate": 0.0003501602564102564, |
|
"loss": 0.9997, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 0.0003497596153846154, |
|
"loss": 0.9695, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 0.00034935897435897433, |
|
"loss": 1.0172, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 0.00034895833333333334, |
|
"loss": 0.9955, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"learning_rate": 0.0003485576923076923, |
|
"loss": 0.9902, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"learning_rate": 0.0003481570512820513, |
|
"loss": 1.0225, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"learning_rate": 0.00034775641025641023, |
|
"loss": 0.9773, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 0.00034735576923076923, |
|
"loss": 0.9947, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"learning_rate": 0.0003469551282051282, |
|
"loss": 1.0234, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 9.65, |
|
"learning_rate": 0.0003465544871794872, |
|
"loss": 0.9692, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"learning_rate": 0.00034615384615384613, |
|
"loss": 0.9782, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 9.7, |
|
"learning_rate": 0.00034575320512820513, |
|
"loss": 1.0416, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 0.0003453525641025641, |
|
"loss": 1.0008, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 0.0003449519230769231, |
|
"loss": 1.0854, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 9.78, |
|
"learning_rate": 0.00034455128205128203, |
|
"loss": 1.085, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 0.00034415064102564103, |
|
"loss": 1.007, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 9.83, |
|
"learning_rate": 0.00034375, |
|
"loss": 1.0516, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 0.000343349358974359, |
|
"loss": 0.9729, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"learning_rate": 0.000342948717948718, |
|
"loss": 1.0174, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"learning_rate": 0.00034254807692307693, |
|
"loss": 1.0392, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 9.93, |
|
"learning_rate": 0.00034214743589743593, |
|
"loss": 1.0534, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"learning_rate": 0.0003417467948717949, |
|
"loss": 1.0584, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"learning_rate": 0.0003413461538461539, |
|
"loss": 1.0727, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 0.00034094551282051283, |
|
"loss": 1.0023, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"learning_rate": 0.00034054487179487183, |
|
"loss": 0.8029, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 10.05, |
|
"learning_rate": 0.0003401442307692308, |
|
"loss": 0.7901, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 10.08, |
|
"learning_rate": 0.0003397435897435898, |
|
"loss": 0.7791, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 0.00033934294871794873, |
|
"loss": 0.7635, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 10.13, |
|
"learning_rate": 0.00033894230769230773, |
|
"loss": 0.7713, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 10.15, |
|
"learning_rate": 0.0003385416666666667, |
|
"loss": 0.827, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 10.18, |
|
"learning_rate": 0.0003381410256410257, |
|
"loss": 0.7844, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 10.2, |
|
"learning_rate": 0.00033774038461538463, |
|
"loss": 0.7839, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 10.23, |
|
"learning_rate": 0.00033733974358974363, |
|
"loss": 0.7702, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 10.26, |
|
"learning_rate": 0.0003369391025641026, |
|
"loss": 0.7855, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 10.28, |
|
"learning_rate": 0.0003365384615384616, |
|
"loss": 0.7669, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 10.31, |
|
"learning_rate": 0.0003361378205128205, |
|
"loss": 0.8054, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 0.0003357371794871795, |
|
"loss": 0.7792, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"learning_rate": 0.0003353365384615384, |
|
"loss": 0.7986, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 10.38, |
|
"learning_rate": 0.0003349358974358974, |
|
"loss": 0.8269, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 10.41, |
|
"learning_rate": 0.00033453525641025637, |
|
"loss": 0.7923, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 10.43, |
|
"learning_rate": 0.00033413461538461537, |
|
"loss": 0.8242, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 10.46, |
|
"learning_rate": 0.0003337339743589743, |
|
"loss": 0.8496, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 10.48, |
|
"learning_rate": 0.0003333333333333333, |
|
"loss": 0.8236, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 10.51, |
|
"learning_rate": 0.0003329326923076923, |
|
"loss": 0.8685, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 10.53, |
|
"learning_rate": 0.00033253205128205127, |
|
"loss": 0.8542, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 10.56, |
|
"learning_rate": 0.0003321314102564103, |
|
"loss": 0.8185, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 10.58, |
|
"learning_rate": 0.0003317307692307692, |
|
"loss": 0.8477, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 10.61, |
|
"learning_rate": 0.0003313301282051282, |
|
"loss": 0.8372, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 10.63, |
|
"learning_rate": 0.00033092948717948717, |
|
"loss": 0.8731, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 10.66, |
|
"learning_rate": 0.00033052884615384617, |
|
"loss": 0.8322, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 10.68, |
|
"learning_rate": 0.0003301282051282051, |
|
"loss": 0.8514, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 10.71, |
|
"learning_rate": 0.0003297275641025641, |
|
"loss": 0.8564, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 0.00032932692307692307, |
|
"loss": 0.8278, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 10.76, |
|
"learning_rate": 0.00032892628205128207, |
|
"loss": 0.8408, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 10.78, |
|
"learning_rate": 0.000328525641025641, |
|
"loss": 0.8438, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 10.81, |
|
"learning_rate": 0.000328125, |
|
"loss": 0.8862, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 10.83, |
|
"learning_rate": 0.00032772435897435897, |
|
"loss": 0.8631, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 10.86, |
|
"learning_rate": 0.00032732371794871797, |
|
"loss": 0.883, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 10.89, |
|
"learning_rate": 0.0003269230769230769, |
|
"loss": 0.8644, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"learning_rate": 0.0003265224358974359, |
|
"loss": 0.8907, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 10.94, |
|
"learning_rate": 0.00032612179487179487, |
|
"loss": 0.9141, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 10.96, |
|
"learning_rate": 0.00032572115384615387, |
|
"loss": 0.8877, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 10.99, |
|
"learning_rate": 0.0003253205128205128, |
|
"loss": 0.8521, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 11.01, |
|
"learning_rate": 0.0003249198717948718, |
|
"loss": 0.7842, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 11.04, |
|
"learning_rate": 0.00032451923076923077, |
|
"loss": 0.6516, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 11.06, |
|
"learning_rate": 0.00032411858974358977, |
|
"loss": 0.6549, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 11.09, |
|
"learning_rate": 0.0003237179487179487, |
|
"loss": 0.6736, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"learning_rate": 0.0003233173076923077, |
|
"loss": 0.6625, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"learning_rate": 0.0003229166666666667, |
|
"loss": 0.6466, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 11.16, |
|
"learning_rate": 0.00032251602564102567, |
|
"loss": 0.6362, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"learning_rate": 0.00032211538461538467, |
|
"loss": 0.674, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 11.21, |
|
"learning_rate": 0.0003217147435897436, |
|
"loss": 0.6509, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 11.24, |
|
"learning_rate": 0.0003213141025641026, |
|
"loss": 0.618, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 11.26, |
|
"learning_rate": 0.00032091346153846156, |
|
"loss": 0.6654, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 11.29, |
|
"learning_rate": 0.00032051282051282057, |
|
"loss": 0.6632, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 11.31, |
|
"learning_rate": 0.00032011217948717946, |
|
"loss": 0.6599, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 11.34, |
|
"learning_rate": 0.00031971153846153846, |
|
"loss": 0.6537, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 0.0003193108974358974, |
|
"loss": 0.678, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 11.39, |
|
"learning_rate": 0.0003189102564102564, |
|
"loss": 0.684, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 11.41, |
|
"learning_rate": 0.00031850961538461536, |
|
"loss": 0.6419, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 11.44, |
|
"learning_rate": 0.00031810897435897436, |
|
"loss": 0.6614, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 11.46, |
|
"learning_rate": 0.0003177083333333333, |
|
"loss": 0.7037, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 11.49, |
|
"learning_rate": 0.0003173076923076923, |
|
"loss": 0.6944, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 11.51, |
|
"learning_rate": 0.00031690705128205126, |
|
"loss": 0.6818, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 11.54, |
|
"learning_rate": 0.00031650641025641026, |
|
"loss": 0.692, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 11.57, |
|
"learning_rate": 0.0003161057692307692, |
|
"loss": 0.7028, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 11.59, |
|
"learning_rate": 0.0003157051282051282, |
|
"loss": 0.7009, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 11.62, |
|
"learning_rate": 0.00031530448717948716, |
|
"loss": 0.7325, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 11.64, |
|
"learning_rate": 0.00031490384615384616, |
|
"loss": 0.6738, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 11.67, |
|
"learning_rate": 0.0003145032051282051, |
|
"loss": 0.7008, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 11.69, |
|
"learning_rate": 0.0003141025641025641, |
|
"loss": 0.6838, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 11.72, |
|
"learning_rate": 0.00031370192307692306, |
|
"loss": 0.726, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 11.74, |
|
"learning_rate": 0.00031330128205128206, |
|
"loss": 0.7167, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 11.77, |
|
"learning_rate": 0.000312900641025641, |
|
"loss": 0.7236, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 11.79, |
|
"learning_rate": 0.0003125, |
|
"loss": 0.7076, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"learning_rate": 0.000312099358974359, |
|
"loss": 0.7415, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 11.84, |
|
"learning_rate": 0.00031169871794871796, |
|
"loss": 0.7313, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 11.87, |
|
"learning_rate": 0.00031129807692307696, |
|
"loss": 0.7403, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 11.89, |
|
"learning_rate": 0.0003108974358974359, |
|
"loss": 0.7145, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 11.92, |
|
"learning_rate": 0.0003104967948717949, |
|
"loss": 0.7532, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 11.94, |
|
"learning_rate": 0.00031009615384615385, |
|
"loss": 0.7326, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 11.97, |
|
"learning_rate": 0.00030969551282051286, |
|
"loss": 0.7699, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 11.99, |
|
"learning_rate": 0.0003092948717948718, |
|
"loss": 0.7251, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 0.0003088942307692308, |
|
"loss": 0.5773, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 12.04, |
|
"learning_rate": 0.00030849358974358975, |
|
"loss": 0.5288, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 12.07, |
|
"learning_rate": 0.00030809294871794876, |
|
"loss": 0.5312, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 12.09, |
|
"learning_rate": 0.0003076923076923077, |
|
"loss": 0.5445, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"learning_rate": 0.0003072916666666667, |
|
"loss": 0.5218, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 12.14, |
|
"learning_rate": 0.00030689102564102565, |
|
"loss": 0.5311, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 12.17, |
|
"learning_rate": 0.00030649038461538465, |
|
"loss": 0.5541, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 0.0003060897435897436, |
|
"loss": 0.5242, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 12.22, |
|
"learning_rate": 0.0003056891025641026, |
|
"loss": 0.5302, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 12.25, |
|
"learning_rate": 0.00030528846153846155, |
|
"loss": 0.536, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 12.27, |
|
"learning_rate": 0.00030488782051282055, |
|
"loss": 0.5545, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 12.3, |
|
"learning_rate": 0.00030448717948717945, |
|
"loss": 0.5476, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 12.32, |
|
"learning_rate": 0.00030408653846153845, |
|
"loss": 0.5528, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 12.35, |
|
"learning_rate": 0.0003036858974358974, |
|
"loss": 0.5633, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 12.37, |
|
"learning_rate": 0.0003032852564102564, |
|
"loss": 0.571, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 12.4, |
|
"learning_rate": 0.00030288461538461535, |
|
"loss": 0.5637, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 12.42, |
|
"learning_rate": 0.00030248397435897435, |
|
"loss": 0.5613, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 12.45, |
|
"learning_rate": 0.0003020833333333333, |
|
"loss": 0.5999, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 12.47, |
|
"learning_rate": 0.0003016826923076923, |
|
"loss": 0.5633, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 0.0003012820512820513, |
|
"loss": 0.5811, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 12.52, |
|
"learning_rate": 0.00030088141025641025, |
|
"loss": 0.5711, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 12.55, |
|
"learning_rate": 0.00030048076923076925, |
|
"loss": 0.5669, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 12.57, |
|
"learning_rate": 0.0003000801282051282, |
|
"loss": 0.5424, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 0.0002996794871794872, |
|
"loss": 0.5592, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 12.62, |
|
"learning_rate": 0.00029927884615384614, |
|
"loss": 0.5645, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 12.65, |
|
"learning_rate": 0.00029887820512820515, |
|
"loss": 0.6089, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 12.67, |
|
"learning_rate": 0.0002984775641025641, |
|
"loss": 0.57, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 12.7, |
|
"learning_rate": 0.0002980769230769231, |
|
"loss": 0.57, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 12.72, |
|
"learning_rate": 0.00029767628205128204, |
|
"loss": 0.5511, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 12.75, |
|
"learning_rate": 0.00029727564102564105, |
|
"loss": 0.5765, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 12.77, |
|
"learning_rate": 0.000296875, |
|
"loss": 0.5896, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 0.000296474358974359, |
|
"loss": 0.6329, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 12.83, |
|
"learning_rate": 0.00029607371794871794, |
|
"loss": 0.5682, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 12.85, |
|
"learning_rate": 0.00029567307692307694, |
|
"loss": 0.58, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 12.88, |
|
"learning_rate": 0.0002952724358974359, |
|
"loss": 0.6014, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"learning_rate": 0.0002948717948717949, |
|
"loss": 0.6082, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 12.93, |
|
"learning_rate": 0.00029447115384615384, |
|
"loss": 0.6392, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 12.95, |
|
"learning_rate": 0.00029407051282051284, |
|
"loss": 0.609, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"learning_rate": 0.0002936698717948718, |
|
"loss": 0.6247, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 0.0002932692307692308, |
|
"loss": 0.5943, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"learning_rate": 0.00029286858974358974, |
|
"loss": 0.4499, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 13.05, |
|
"learning_rate": 0.00029246794871794874, |
|
"loss": 0.4412, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 13.08, |
|
"learning_rate": 0.0002920673076923077, |
|
"loss": 0.4409, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 13.1, |
|
"learning_rate": 0.0002916666666666667, |
|
"loss": 0.4505, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 13.13, |
|
"learning_rate": 0.0002912660256410257, |
|
"loss": 0.4489, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 13.15, |
|
"learning_rate": 0.00029086538461538464, |
|
"loss": 0.4235, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 13.18, |
|
"learning_rate": 0.00029046474358974364, |
|
"loss": 0.4276, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 13.2, |
|
"learning_rate": 0.0002900641025641026, |
|
"loss": 0.4544, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 13.23, |
|
"learning_rate": 0.0002896634615384616, |
|
"loss": 0.4625, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 13.25, |
|
"learning_rate": 0.00028926282051282054, |
|
"loss": 0.4266, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 13.28, |
|
"learning_rate": 0.0002888621794871795, |
|
"loss": 0.4459, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 13.3, |
|
"learning_rate": 0.00028846153846153843, |
|
"loss": 0.4261, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 0.00028806089743589744, |
|
"loss": 0.4376, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 13.35, |
|
"learning_rate": 0.0002876602564102564, |
|
"loss": 0.4418, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 13.38, |
|
"learning_rate": 0.0002872596153846154, |
|
"loss": 0.4667, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"learning_rate": 0.00028685897435897433, |
|
"loss": 0.4504, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 13.43, |
|
"learning_rate": 0.00028645833333333333, |
|
"loss": 0.4691, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 13.46, |
|
"learning_rate": 0.0002860576923076923, |
|
"loss": 0.4684, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 13.48, |
|
"learning_rate": 0.0002856570512820513, |
|
"loss": 0.467, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 13.51, |
|
"learning_rate": 0.00028525641025641023, |
|
"loss": 0.4716, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 13.53, |
|
"learning_rate": 0.00028485576923076923, |
|
"loss": 0.4619, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 13.56, |
|
"learning_rate": 0.0002844551282051282, |
|
"loss": 0.4775, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 13.58, |
|
"learning_rate": 0.0002840544871794872, |
|
"loss": 0.4568, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 13.61, |
|
"learning_rate": 0.00028365384615384613, |
|
"loss": 0.4763, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 13.63, |
|
"learning_rate": 0.00028325320512820513, |
|
"loss": 0.466, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 13.66, |
|
"learning_rate": 0.0002828525641025641, |
|
"loss": 0.4543, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 13.68, |
|
"learning_rate": 0.0002824519230769231, |
|
"loss": 0.4666, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 13.71, |
|
"learning_rate": 0.00028205128205128203, |
|
"loss": 0.489, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 13.73, |
|
"learning_rate": 0.00028165064102564103, |
|
"loss": 0.491, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 13.76, |
|
"learning_rate": 0.00028125000000000003, |
|
"loss": 0.4847, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 13.78, |
|
"learning_rate": 0.000280849358974359, |
|
"loss": 0.4899, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 13.81, |
|
"learning_rate": 0.000280448717948718, |
|
"loss": 0.5169, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 13.83, |
|
"learning_rate": 0.00028004807692307693, |
|
"loss": 0.4968, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 13.86, |
|
"learning_rate": 0.00027964743589743593, |
|
"loss": 0.4774, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 13.88, |
|
"learning_rate": 0.0002792467948717949, |
|
"loss": 0.5105, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 13.91, |
|
"learning_rate": 0.0002788461538461539, |
|
"loss": 0.4997, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 13.93, |
|
"learning_rate": 0.00027844551282051283, |
|
"loss": 0.5088, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 13.96, |
|
"learning_rate": 0.00027804487179487183, |
|
"loss": 0.5091, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 13.98, |
|
"learning_rate": 0.0002776442307692308, |
|
"loss": 0.532, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 14.01, |
|
"learning_rate": 0.0002772435897435898, |
|
"loss": 0.4487, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 14.03, |
|
"learning_rate": 0.00027684294871794873, |
|
"loss": 0.3525, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 14.06, |
|
"learning_rate": 0.00027644230769230773, |
|
"loss": 0.3715, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 14.09, |
|
"learning_rate": 0.0002760416666666667, |
|
"loss": 0.3547, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 14.11, |
|
"learning_rate": 0.0002756410256410257, |
|
"loss": 0.3862, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 14.14, |
|
"learning_rate": 0.0002752403846153846, |
|
"loss": 0.3514, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 14.16, |
|
"learning_rate": 0.00027483974358974363, |
|
"loss": 0.362, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 14.19, |
|
"learning_rate": 0.0002744391025641026, |
|
"loss": 0.3457, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 14.21, |
|
"learning_rate": 0.0002740384615384616, |
|
"loss": 0.3662, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 14.24, |
|
"learning_rate": 0.0002736378205128205, |
|
"loss": 0.3746, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 14.26, |
|
"learning_rate": 0.00027323717948717947, |
|
"loss": 0.3388, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 14.29, |
|
"learning_rate": 0.0002728365384615384, |
|
"loss": 0.3824, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 14.31, |
|
"learning_rate": 0.0002724358974358974, |
|
"loss": 0.3584, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 14.34, |
|
"learning_rate": 0.00027203525641025637, |
|
"loss": 0.3544, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 14.36, |
|
"learning_rate": 0.00027163461538461537, |
|
"loss": 0.3721, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"learning_rate": 0.0002712339743589743, |
|
"loss": 0.3769, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 14.41, |
|
"learning_rate": 0.0002708333333333333, |
|
"loss": 0.3872, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 14.44, |
|
"learning_rate": 0.0002704326923076923, |
|
"loss": 0.363, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 14.46, |
|
"learning_rate": 0.00027003205128205127, |
|
"loss": 0.3842, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 14.49, |
|
"learning_rate": 0.00026963141025641027, |
|
"loss": 0.3737, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 14.51, |
|
"learning_rate": 0.0002692307692307692, |
|
"loss": 0.3981, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 14.54, |
|
"learning_rate": 0.0002688301282051282, |
|
"loss": 0.3656, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 14.56, |
|
"learning_rate": 0.00026842948717948717, |
|
"loss": 0.3852, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 14.59, |
|
"learning_rate": 0.00026802884615384617, |
|
"loss": 0.3865, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 14.61, |
|
"learning_rate": 0.0002676282051282051, |
|
"loss": 0.3747, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 14.64, |
|
"learning_rate": 0.0002672275641025641, |
|
"loss": 0.3729, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 14.66, |
|
"learning_rate": 0.00026682692307692307, |
|
"loss": 0.3785, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 14.69, |
|
"learning_rate": 0.00026642628205128207, |
|
"loss": 0.4018, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"learning_rate": 0.000266025641025641, |
|
"loss": 0.3854, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 14.74, |
|
"learning_rate": 0.000265625, |
|
"loss": 0.4161, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 14.77, |
|
"learning_rate": 0.00026522435897435897, |
|
"loss": 0.4093, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 14.79, |
|
"learning_rate": 0.00026482371794871797, |
|
"loss": 0.4126, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 14.82, |
|
"learning_rate": 0.0002644230769230769, |
|
"loss": 0.3977, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 14.84, |
|
"learning_rate": 0.0002640224358974359, |
|
"loss": 0.4286, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 14.87, |
|
"learning_rate": 0.00026362179487179487, |
|
"loss": 0.4371, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 14.89, |
|
"learning_rate": 0.00026322115384615387, |
|
"loss": 0.4066, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 14.92, |
|
"learning_rate": 0.0002628205128205128, |
|
"loss": 0.433, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 14.94, |
|
"learning_rate": 0.0002624198717948718, |
|
"loss": 0.4361, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 14.97, |
|
"learning_rate": 0.00026201923076923076, |
|
"loss": 0.4276, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 14.99, |
|
"learning_rate": 0.00026161858974358977, |
|
"loss": 0.4401, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 15.02, |
|
"learning_rate": 0.0002612179487179487, |
|
"loss": 0.3514, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 15.04, |
|
"learning_rate": 0.0002608173076923077, |
|
"loss": 0.306, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 15.07, |
|
"learning_rate": 0.0002604166666666667, |
|
"loss": 0.2862, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 15.09, |
|
"learning_rate": 0.00026001602564102566, |
|
"loss": 0.3042, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 15.12, |
|
"learning_rate": 0.00025961538461538467, |
|
"loss": 0.2978, |
|
"step": 600 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1248, |
|
"num_train_epochs": 32, |
|
"save_steps": 100, |
|
"total_flos": 3.374598554368205e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|