|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9987389659520807, |
|
"global_step": 594, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 4.8182, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 6e-07, |
|
"loss": 5.0028, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.2e-06, |
|
"loss": 4.8689, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.8e-06, |
|
"loss": 4.8344, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.4e-06, |
|
"loss": 4.7712, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.9999999999999997e-06, |
|
"loss": 4.7584, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.6e-06, |
|
"loss": 4.8249, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.2e-06, |
|
"loss": 4.6967, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.8e-06, |
|
"loss": 4.7905, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.399999999999999e-06, |
|
"loss": 4.6558, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.999999999999999e-06, |
|
"loss": 4.6367, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6.599999999999999e-06, |
|
"loss": 4.646, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 7.2e-06, |
|
"loss": 4.543, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 7.799999999999998e-06, |
|
"loss": 4.4923, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 8.4e-06, |
|
"loss": 4.3786, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 8.999999999999999e-06, |
|
"loss": 4.5164, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.6e-06, |
|
"loss": 4.5063, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.02e-05, |
|
"loss": 4.5819, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.0799999999999998e-05, |
|
"loss": 4.3233, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.14e-05, |
|
"loss": 4.5417, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.1999999999999999e-05, |
|
"loss": 4.4638, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.26e-05, |
|
"loss": 4.4333, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.3199999999999997e-05, |
|
"loss": 4.4354, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.3799999999999998e-05, |
|
"loss": 4.4696, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.44e-05, |
|
"loss": 4.2463, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 4.6625, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.5599999999999996e-05, |
|
"loss": 4.3226, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.6199999999999997e-05, |
|
"loss": 4.2108, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.68e-05, |
|
"loss": 4.2346, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.74e-05, |
|
"loss": 4.3936, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.7999999999999997e-05, |
|
"loss": 4.4266, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.8599999999999998e-05, |
|
"loss": 4.2831, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.92e-05, |
|
"loss": 4.2859, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.98e-05, |
|
"loss": 4.2428, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.04e-05, |
|
"loss": 4.295, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.1e-05, |
|
"loss": 4.452, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.1599999999999996e-05, |
|
"loss": 4.2423, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.2199999999999998e-05, |
|
"loss": 4.4074, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.28e-05, |
|
"loss": 4.556, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.34e-05, |
|
"loss": 4.2127, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.3999999999999997e-05, |
|
"loss": 4.2372, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.4599999999999998e-05, |
|
"loss": 4.4704, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.52e-05, |
|
"loss": 4.1838, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.5799999999999997e-05, |
|
"loss": 4.3398, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.6399999999999995e-05, |
|
"loss": 4.4231, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.6999999999999996e-05, |
|
"loss": 4.4815, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.7599999999999997e-05, |
|
"loss": 4.3865, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.8199999999999998e-05, |
|
"loss": 4.7313, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.8199999999999998e-05, |
|
"loss": 4.7684, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.88e-05, |
|
"loss": 4.4833, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 2.94e-05, |
|
"loss": 4.193, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 4.2105, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 3.06e-05, |
|
"loss": 4.2301, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 3.119999999999999e-05, |
|
"loss": 4.3011, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 3.1799999999999994e-05, |
|
"loss": 4.1692, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 3.2399999999999995e-05, |
|
"loss": 4.3675, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.2999999999999996e-05, |
|
"loss": 4.1814, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.36e-05, |
|
"loss": 4.4746, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.42e-05, |
|
"loss": 4.2425, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.48e-05, |
|
"loss": 4.0318, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.539999999999999e-05, |
|
"loss": 4.2939, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.5999999999999994e-05, |
|
"loss": 4.3447, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3.6599999999999995e-05, |
|
"loss": 4.2286, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3.7199999999999996e-05, |
|
"loss": 4.2042, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3.78e-05, |
|
"loss": 4.236, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3.84e-05, |
|
"loss": 4.1772, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3.9e-05, |
|
"loss": 4.142, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3.96e-05, |
|
"loss": 4.1345, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.02e-05, |
|
"loss": 4.2305, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.08e-05, |
|
"loss": 4.2083, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.14e-05, |
|
"loss": 4.3085, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.2e-05, |
|
"loss": 4.2821, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.259999999999999e-05, |
|
"loss": 4.0847, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.319999999999999e-05, |
|
"loss": 4.2869, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.3799999999999994e-05, |
|
"loss": 4.2665, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.4399999999999995e-05, |
|
"loss": 4.24, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 4.219, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.56e-05, |
|
"loss": 4.1975, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.62e-05, |
|
"loss": 4.1354, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.68e-05, |
|
"loss": 4.3206, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.7399999999999993e-05, |
|
"loss": 4.2772, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.7999999999999994e-05, |
|
"loss": 4.2269, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.8599999999999995e-05, |
|
"loss": 4.3281, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.9199999999999997e-05, |
|
"loss": 4.291, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.98e-05, |
|
"loss": 4.2093, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.04e-05, |
|
"loss": 4.1592, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.1e-05, |
|
"loss": 4.4026, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.1599999999999994e-05, |
|
"loss": 4.2414, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.2199999999999995e-05, |
|
"loss": 4.245, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.279999999999999e-05, |
|
"loss": 4.2845, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.339999999999999e-05, |
|
"loss": 4.247, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.399999999999999e-05, |
|
"loss": 4.3707, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 5.459999999999999e-05, |
|
"loss": 4.2318, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 5.519999999999999e-05, |
|
"loss": 4.4576, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 5.5799999999999994e-05, |
|
"loss": 4.3531, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 5.6399999999999995e-05, |
|
"loss": 4.409, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 5.6999999999999996e-05, |
|
"loss": 4.3879, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 5.76e-05, |
|
"loss": 4.309, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.82e-05, |
|
"loss": 4.5432, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.88e-05, |
|
"loss": 4.205, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.94e-05, |
|
"loss": 4.3588, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 4.1904, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.0599999999999996e-05, |
|
"loss": 4.2077, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.12e-05, |
|
"loss": 4.13, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 6.18e-05, |
|
"loss": 4.1796, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 6.239999999999999e-05, |
|
"loss": 4.1452, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 6.299999999999999e-05, |
|
"loss": 4.2699, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 6.359999999999999e-05, |
|
"loss": 4.3016, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 6.419999999999999e-05, |
|
"loss": 4.4238, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 6.479999999999999e-05, |
|
"loss": 4.2019, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 6.539999999999999e-05, |
|
"loss": 4.2325, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 6.599999999999999e-05, |
|
"loss": 4.2623, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 6.659999999999999e-05, |
|
"loss": 4.2221, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 6.72e-05, |
|
"loss": 4.2787, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 6.78e-05, |
|
"loss": 4.2717, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 6.84e-05, |
|
"loss": 4.2513, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 6.9e-05, |
|
"loss": 4.3379, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 6.96e-05, |
|
"loss": 4.1303, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 7.02e-05, |
|
"loss": 4.1263, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 7.079999999999999e-05, |
|
"loss": 4.1342, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 7.139999999999999e-05, |
|
"loss": 4.3001, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.199999999999999e-05, |
|
"loss": 4.4657, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.259999999999999e-05, |
|
"loss": 4.194, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.319999999999999e-05, |
|
"loss": 4.2687, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.379999999999999e-05, |
|
"loss": 4.2877, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.439999999999999e-05, |
|
"loss": 4.1317, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.5e-05, |
|
"loss": 4.145, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.56e-05, |
|
"loss": 4.1317, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.62e-05, |
|
"loss": 4.2327, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.68e-05, |
|
"loss": 4.2944, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.74e-05, |
|
"loss": 4.254, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.8e-05, |
|
"loss": 4.2138, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.86e-05, |
|
"loss": 4.2627, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.92e-05, |
|
"loss": 4.3398, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.98e-05, |
|
"loss": 4.3485, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 8.04e-05, |
|
"loss": 4.2115, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 8.1e-05, |
|
"loss": 4.1802, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 8.16e-05, |
|
"loss": 4.24, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 8.22e-05, |
|
"loss": 4.2233, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8.28e-05, |
|
"loss": 4.3909, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8.34e-05, |
|
"loss": 4.2786, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8.4e-05, |
|
"loss": 4.3874, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8.459999999999998e-05, |
|
"loss": 4.4902, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8.519999999999998e-05, |
|
"loss": 4.3232, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8.579999999999998e-05, |
|
"loss": 4.3679, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.639999999999999e-05, |
|
"loss": 4.6866, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.699999999999999e-05, |
|
"loss": 4.6394, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.759999999999999e-05, |
|
"loss": 4.5798, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.819999999999999e-05, |
|
"loss": 4.6793, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.879999999999999e-05, |
|
"loss": 4.5903, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.939999999999999e-05, |
|
"loss": 4.2962, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 4.2278, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.059999999999999e-05, |
|
"loss": 4.2305, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.12e-05, |
|
"loss": 4.145, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.18e-05, |
|
"loss": 4.1925, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.24e-05, |
|
"loss": 4.1712, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.3e-05, |
|
"loss": 4.301, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 9.36e-05, |
|
"loss": 4.1469, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 9.419999999999999e-05, |
|
"loss": 4.0775, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 9.479999999999999e-05, |
|
"loss": 4.2078, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 9.539999999999999e-05, |
|
"loss": 4.2729, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 9.599999999999999e-05, |
|
"loss": 4.1514, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 9.659999999999999e-05, |
|
"loss": 4.1282, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.719999999999999e-05, |
|
"loss": 4.1626, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.779999999999999e-05, |
|
"loss": 4.3451, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.839999999999999e-05, |
|
"loss": 4.3345, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.9e-05, |
|
"loss": 4.1537, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.96e-05, |
|
"loss": 4.3872, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001002, |
|
"loss": 4.2078, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001008, |
|
"loss": 4.2435, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001014, |
|
"loss": 4.1005, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000102, |
|
"loss": 4.1444, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001026, |
|
"loss": 4.1539, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00010319999999999999, |
|
"loss": 4.2127, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00010379999999999999, |
|
"loss": 4.3975, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00010439999999999999, |
|
"loss": 4.3712, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00010499999999999999, |
|
"loss": 4.3348, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00010559999999999998, |
|
"loss": 4.1479, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00010619999999999998, |
|
"loss": 4.2947, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00010679999999999998, |
|
"loss": 4.1779, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00010739999999999998, |
|
"loss": 4.3328, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00010799999999999998, |
|
"loss": 4.2, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00010859999999999998, |
|
"loss": 4.2252, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00010919999999999998, |
|
"loss": 4.1258, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00010979999999999999, |
|
"loss": 4.4858, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00011039999999999999, |
|
"loss": 4.2418, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00011099999999999999, |
|
"loss": 4.2566, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00011159999999999999, |
|
"loss": 4.3974, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00011219999999999999, |
|
"loss": 4.4813, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00011279999999999999, |
|
"loss": 4.1671, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00011339999999999999, |
|
"loss": 4.4851, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00011399999999999999, |
|
"loss": 4.3957, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001146, |
|
"loss": 4.6878, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001152, |
|
"loss": 4.3941, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001158, |
|
"loss": 4.3453, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001164, |
|
"loss": 4.4555, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000117, |
|
"loss": 4.7167, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001176, |
|
"loss": 4.538, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001182, |
|
"loss": 4.5329, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001188, |
|
"loss": 4.5335, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001194, |
|
"loss": 4.1187, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 4.2866, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00012059999999999999, |
|
"loss": 4.3993, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00012119999999999999, |
|
"loss": 4.1615, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00012179999999999999, |
|
"loss": 4.226, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001224, |
|
"loss": 4.1952, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012299999999999998, |
|
"loss": 4.2312, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001236, |
|
"loss": 4.3538, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012419999999999998, |
|
"loss": 4.265, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012479999999999997, |
|
"loss": 4.3468, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012539999999999999, |
|
"loss": 4.3613, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012599999999999997, |
|
"loss": 4.2659, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001266, |
|
"loss": 4.2808, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012719999999999997, |
|
"loss": 4.3405, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001278, |
|
"loss": 4.2305, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012839999999999998, |
|
"loss": 4.277, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000129, |
|
"loss": 4.1606, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012959999999999998, |
|
"loss": 4.3264, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001302, |
|
"loss": 4.4251, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00013079999999999998, |
|
"loss": 4.4401, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001314, |
|
"loss": 4.2936, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00013199999999999998, |
|
"loss": 4.3649, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001326, |
|
"loss": 4.3694, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00013319999999999999, |
|
"loss": 4.3328, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001338, |
|
"loss": 4.5794, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001344, |
|
"loss": 4.2256, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000135, |
|
"loss": 4.1422, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001356, |
|
"loss": 4.2117, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001362, |
|
"loss": 4.4223, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001368, |
|
"loss": 4.384, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001374, |
|
"loss": 4.1981, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000138, |
|
"loss": 4.2797, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001386, |
|
"loss": 4.5837, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001392, |
|
"loss": 4.2845, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00013979999999999998, |
|
"loss": 4.4128, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001404, |
|
"loss": 4.1982, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00014099999999999998, |
|
"loss": 4.158, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00014159999999999997, |
|
"loss": 4.2719, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001422, |
|
"loss": 4.3127, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00014279999999999997, |
|
"loss": 4.3545, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001434, |
|
"loss": 4.5494, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00014399999999999998, |
|
"loss": 4.4465, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001446, |
|
"loss": 4.2999, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00014519999999999998, |
|
"loss": 4.4911, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001458, |
|
"loss": 4.7491, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00014639999999999998, |
|
"loss": 4.5465, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000147, |
|
"loss": 4.9226, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00014759999999999998, |
|
"loss": 4.895, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001482, |
|
"loss": 4.4854, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00014879999999999998, |
|
"loss": 4.6152, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001494, |
|
"loss": 4.4272, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00015, |
|
"loss": 4.582, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00015059999999999997, |
|
"loss": 4.7457, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001512, |
|
"loss": 4.4816, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00015179999999999998, |
|
"loss": 4.8845, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001524, |
|
"loss": 4.8351, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00015299999999999998, |
|
"loss": 4.5857, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001536, |
|
"loss": 4.389, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00015419999999999998, |
|
"loss": 4.4282, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001548, |
|
"loss": 4.647, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00015539999999999998, |
|
"loss": 4.6697, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.000156, |
|
"loss": 4.5029, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00015659999999999998, |
|
"loss": 4.8514, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001572, |
|
"loss": 4.1349, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001578, |
|
"loss": 4.3635, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001584, |
|
"loss": 4.5442, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000159, |
|
"loss": 5.1162, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001596, |
|
"loss": 4.4451, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001602, |
|
"loss": 4.5503, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001608, |
|
"loss": 4.4475, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001614, |
|
"loss": 4.3478, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.000162, |
|
"loss": 4.6113, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001626, |
|
"loss": 4.7242, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001632, |
|
"loss": 4.6371, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001638, |
|
"loss": 4.4794, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001644, |
|
"loss": 4.3902, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000165, |
|
"loss": 4.5252, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001656, |
|
"loss": 4.5436, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001662, |
|
"loss": 4.4553, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001668, |
|
"loss": 4.5571, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001674, |
|
"loss": 4.5108, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000168, |
|
"loss": 4.5576, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0001686, |
|
"loss": 4.4762, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00016919999999999997, |
|
"loss": 4.2964, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00016979999999999998, |
|
"loss": 4.5742, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00017039999999999997, |
|
"loss": 4.2314, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00017099999999999998, |
|
"loss": 4.7449, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00017159999999999997, |
|
"loss": 4.3179, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00017219999999999998, |
|
"loss": 4.3885, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00017279999999999997, |
|
"loss": 4.6569, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00017339999999999996, |
|
"loss": 4.5262, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00017399999999999997, |
|
"loss": 4.3064, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00017459999999999996, |
|
"loss": 4.6379, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00017519999999999998, |
|
"loss": 4.6143, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00017579999999999996, |
|
"loss": 4.4993, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00017639999999999998, |
|
"loss": 4.348, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00017699999999999997, |
|
"loss": 5.2729, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00017759999999999998, |
|
"loss": 5.2359, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00017819999999999997, |
|
"loss": 4.8578, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00017879999999999998, |
|
"loss": 5.5686, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00017939999999999997, |
|
"loss": 4.6844, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 4.5929, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018059999999999997, |
|
"loss": 4.7074, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018119999999999999, |
|
"loss": 4.4101, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018179999999999997, |
|
"loss": 4.5093, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001824, |
|
"loss": 4.725, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018299999999999998, |
|
"loss": 4.6516, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0001836, |
|
"loss": 4.7071, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018419999999999998, |
|
"loss": 4.4795, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0001848, |
|
"loss": 4.4157, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018539999999999998, |
|
"loss": 4.2875, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.000186, |
|
"loss": 4.5176, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018659999999999998, |
|
"loss": 4.4679, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001872, |
|
"loss": 4.6015, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018779999999999998, |
|
"loss": 4.338, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018839999999999997, |
|
"loss": 4.6336, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018899999999999999, |
|
"loss": 4.4357, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018959999999999997, |
|
"loss": 4.8962, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001902, |
|
"loss": 4.5626, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00019079999999999998, |
|
"loss": 4.5467, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001914, |
|
"loss": 4.2766, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00019199999999999998, |
|
"loss": 4.3254, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001926, |
|
"loss": 4.8041, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00019319999999999998, |
|
"loss": 4.3947, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0001938, |
|
"loss": 4.7643, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019439999999999998, |
|
"loss": 4.4117, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.000195, |
|
"loss": 4.7301, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019559999999999998, |
|
"loss": 4.4717, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0001962, |
|
"loss": 4.4449, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019679999999999999, |
|
"loss": 4.4055, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001974, |
|
"loss": 4.5839, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.000198, |
|
"loss": 4.2595, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001986, |
|
"loss": 4.4989, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001992, |
|
"loss": 4.5707, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001998, |
|
"loss": 4.4243, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0002004, |
|
"loss": 4.4127, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.000201, |
|
"loss": 4.4754, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002016, |
|
"loss": 4.3751, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002022, |
|
"loss": 4.4543, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002028, |
|
"loss": 4.6159, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00020339999999999998, |
|
"loss": 4.4517, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.000204, |
|
"loss": 4.7063, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00020459999999999999, |
|
"loss": 4.7711, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002052, |
|
"loss": 4.6853, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002058, |
|
"loss": 4.8209, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00020639999999999998, |
|
"loss": 4.3988, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00020699999999999996, |
|
"loss": 4.5112, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00020759999999999998, |
|
"loss": 4.6472, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00020819999999999996, |
|
"loss": 4.4794, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00020879999999999998, |
|
"loss": 5.2373, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00020939999999999997, |
|
"loss": 4.5732, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 4.5354, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00021059999999999997, |
|
"loss": 4.5812, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00021119999999999996, |
|
"loss": 4.4975, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00021179999999999997, |
|
"loss": 4.4032, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00021239999999999996, |
|
"loss": 4.3613, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00021299999999999997, |
|
"loss": 4.3984, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00021359999999999996, |
|
"loss": 4.7058, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00021419999999999998, |
|
"loss": 4.3473, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00021479999999999996, |
|
"loss": 4.6245, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00021539999999999998, |
|
"loss": 4.6861, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00021599999999999996, |
|
"loss": 4.4029, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00021659999999999998, |
|
"loss": 4.7367, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00021719999999999997, |
|
"loss": 4.4922, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00021779999999999998, |
|
"loss": 4.7612, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00021839999999999997, |
|
"loss": 4.447, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00021899999999999998, |
|
"loss": 4.5431, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00021959999999999997, |
|
"loss": 4.3665, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00022019999999999999, |
|
"loss": 4.4973, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00022079999999999997, |
|
"loss": 4.8256, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0002214, |
|
"loss": 4.2738, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00022199999999999998, |
|
"loss": 4.4241, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0002226, |
|
"loss": 4.3496, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00022319999999999998, |
|
"loss": 4.6338, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0002238, |
|
"loss": 4.5995, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00022439999999999998, |
|
"loss": 4.5976, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.000225, |
|
"loss": 4.572, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00022559999999999998, |
|
"loss": 4.6357, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00022619999999999997, |
|
"loss": 4.5382, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00022679999999999998, |
|
"loss": 4.5339, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00022739999999999997, |
|
"loss": 4.6268, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00022799999999999999, |
|
"loss": 4.7323, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00022859999999999997, |
|
"loss": 4.7925, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0002292, |
|
"loss": 4.5124, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00022979999999999997, |
|
"loss": 4.706, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0002304, |
|
"loss": 4.8178, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00023099999999999998, |
|
"loss": 4.981, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0002316, |
|
"loss": 4.4796, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00023219999999999998, |
|
"loss": 4.4747, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002328, |
|
"loss": 4.5041, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00023339999999999998, |
|
"loss": 4.5166, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.000234, |
|
"loss": 4.7084, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00023459999999999998, |
|
"loss": 4.9243, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002352, |
|
"loss": 4.4673, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00023579999999999999, |
|
"loss": 4.9729, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0002364, |
|
"loss": 4.6977, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.000237, |
|
"loss": 4.9387, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0002376, |
|
"loss": 5.7888, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0002382, |
|
"loss": 4.9183, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0002388, |
|
"loss": 5.0151, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0002394, |
|
"loss": 4.7088, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 4.644, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0002406, |
|
"loss": 4.5208, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00024119999999999998, |
|
"loss": 4.4707, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0002418, |
|
"loss": 4.4193, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00024239999999999998, |
|
"loss": 4.3914, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000243, |
|
"loss": 4.6832, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00024359999999999999, |
|
"loss": 4.3622, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00024419999999999997, |
|
"loss": 4.5366, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0002448, |
|
"loss": 4.4287, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00024539999999999995, |
|
"loss": 4.3764, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00024599999999999996, |
|
"loss": 4.6033, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0002466, |
|
"loss": 4.4157, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0002472, |
|
"loss": 4.5498, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00024779999999999995, |
|
"loss": 4.4146, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00024839999999999997, |
|
"loss": 4.4638, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.000249, |
|
"loss": 4.4486, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00024959999999999994, |
|
"loss": 4.1511, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00025019999999999996, |
|
"loss": 4.6831, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00025079999999999997, |
|
"loss": 4.502, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0002514, |
|
"loss": 4.196, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00025199999999999995, |
|
"loss": 4.4275, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00025259999999999996, |
|
"loss": 4.5887, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0002532, |
|
"loss": 4.4421, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0002538, |
|
"loss": 4.4905, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00025439999999999995, |
|
"loss": 4.4904, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00025499999999999996, |
|
"loss": 4.2661, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0002556, |
|
"loss": 4.2795, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0002562, |
|
"loss": 4.8814, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00025679999999999995, |
|
"loss": 4.5092, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00025739999999999997, |
|
"loss": 4.7066, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.000258, |
|
"loss": 4.22, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002586, |
|
"loss": 4.4888, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00025919999999999996, |
|
"loss": 4.5165, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00025979999999999997, |
|
"loss": 4.6135, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002604, |
|
"loss": 4.4378, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.000261, |
|
"loss": 4.6876, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00026159999999999996, |
|
"loss": 4.5232, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002622, |
|
"loss": 4.6054, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002628, |
|
"loss": 4.6991, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00026339999999999995, |
|
"loss": 4.7554, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00026399999999999997, |
|
"loss": 4.8751, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002646, |
|
"loss": 4.8587, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0002652, |
|
"loss": 4.5289, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00026579999999999996, |
|
"loss": 4.3795, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00026639999999999997, |
|
"loss": 4.7699, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.000267, |
|
"loss": 4.5368, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0002676, |
|
"loss": 4.6414, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00026819999999999996, |
|
"loss": 5.1838, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002688, |
|
"loss": 4.3724, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002694, |
|
"loss": 4.4935, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00027, |
|
"loss": 4.6275, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00027059999999999996, |
|
"loss": 4.4756, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002712, |
|
"loss": 4.7369, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0002718, |
|
"loss": 4.6098, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0002724, |
|
"loss": 4.6067, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00027299999999999997, |
|
"loss": 4.8474, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0002736, |
|
"loss": 4.6679, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0002742, |
|
"loss": 4.7952, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0002748, |
|
"loss": 4.5183, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00027539999999999997, |
|
"loss": 4.8535, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.000276, |
|
"loss": 4.4351, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002766, |
|
"loss": 4.7839, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002772, |
|
"loss": 4.6477, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002778, |
|
"loss": 4.8635, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002784, |
|
"loss": 4.4758, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.000279, |
|
"loss": 4.5836, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00027959999999999997, |
|
"loss": 4.4608, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0002802, |
|
"loss": 4.7147, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0002808, |
|
"loss": 4.3855, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00028139999999999996, |
|
"loss": 4.5603, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00028199999999999997, |
|
"loss": 4.3725, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002826, |
|
"loss": 4.4598, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00028319999999999994, |
|
"loss": 4.6407, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00028379999999999996, |
|
"loss": 4.4258, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002844, |
|
"loss": 4.6098, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.000285, |
|
"loss": 4.5606, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00028559999999999995, |
|
"loss": 4.5687, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00028619999999999996, |
|
"loss": 4.3645, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0002868, |
|
"loss": 4.3326, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00028739999999999994, |
|
"loss": 4.9138, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00028799999999999995, |
|
"loss": 4.992, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00028859999999999997, |
|
"loss": 4.7403, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0002892, |
|
"loss": 4.7293, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00028979999999999994, |
|
"loss": 4.5899, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00029039999999999996, |
|
"loss": 4.4421, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00029099999999999997, |
|
"loss": 4.6241, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0002916, |
|
"loss": 4.4847, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00029219999999999995, |
|
"loss": 4.5834, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00029279999999999996, |
|
"loss": 4.4479, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002934, |
|
"loss": 4.7391, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.000294, |
|
"loss": 4.7008, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00029459999999999995, |
|
"loss": 4.3374, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00029519999999999997, |
|
"loss": 4.378, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002958, |
|
"loss": 4.636, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002964, |
|
"loss": 4.8833, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00029699999999999996, |
|
"loss": 4.5708, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00029759999999999997, |
|
"loss": 4.6291, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002982, |
|
"loss": 5.1499, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002988, |
|
"loss": 4.7669, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_loss": 4.873856067657471, |
|
"eval_runtime": 784.3492, |
|
"eval_samples_per_second": 3.368, |
|
"eval_steps_per_second": 0.282, |
|
"eval_wer": 1.9278262594208648, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00029939999999999996, |
|
"loss": 4.5736, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0003, |
|
"loss": 4.6392, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00029680851063829784, |
|
"loss": 4.7407, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002936170212765957, |
|
"loss": 4.9224, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002904255319148936, |
|
"loss": 4.7926, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002872340425531915, |
|
"loss": 5.0733, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00028404255319148934, |
|
"loss": 4.7467, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002808510638297872, |
|
"loss": 4.7175, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00027765957446808506, |
|
"loss": 4.6484, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.000274468085106383, |
|
"loss": 4.7577, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00027127659574468084, |
|
"loss": 4.7992, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002680851063829787, |
|
"loss": 4.5604, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00026489361702127656, |
|
"loss": 4.5546, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002617021276595745, |
|
"loss": 4.9298, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002585106382978723, |
|
"loss": 4.6264, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002553191489361702, |
|
"loss": 4.6125, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00025212765957446806, |
|
"loss": 4.5963, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002489361702127659, |
|
"loss": 4.7157, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00024574468085106384, |
|
"loss": 4.5033, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00024255319148936167, |
|
"loss": 4.5918, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00023936170212765956, |
|
"loss": 4.6696, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00023617021276595742, |
|
"loss": 4.8345, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00023297872340425529, |
|
"loss": 4.3613, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00022978723404255317, |
|
"loss": 4.1583, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00022659574468085106, |
|
"loss": 4.5778, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0002234042553191489, |
|
"loss": 4.7744, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00022021276595744679, |
|
"loss": 4.6864, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00021702127659574468, |
|
"loss": 4.4407, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00021382978723404254, |
|
"loss": 4.6176, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0002106382978723404, |
|
"loss": 4.3369, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0002074468085106383, |
|
"loss": 4.4068, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00020425531914893615, |
|
"loss": 4.7584, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00020106382978723404, |
|
"loss": 4.6745, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00019787234042553187, |
|
"loss": 4.4945, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00019468085106382976, |
|
"loss": 4.3341, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00019148936170212765, |
|
"loss": 4.5124, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001882978723404255, |
|
"loss": 4.5719, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001851063829787234, |
|
"loss": 4.3806, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00018191489361702126, |
|
"loss": 4.623, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00017872340425531912, |
|
"loss": 4.5661, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.000175531914893617, |
|
"loss": 4.6673, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001723404255319149, |
|
"loss": 4.7293, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00016914893617021274, |
|
"loss": 4.4276, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00016595744680851062, |
|
"loss": 4.4851, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00016276595744680849, |
|
"loss": 4.5706, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015957446808510637, |
|
"loss": 4.7982, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015638297872340426, |
|
"loss": 4.4439, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001531914893617021, |
|
"loss": 5.0839, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015, |
|
"loss": 5.1486, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00014680851063829785, |
|
"loss": 4.481, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00014361702127659574, |
|
"loss": 4.5966, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001404255319148936, |
|
"loss": 4.7382, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001372340425531915, |
|
"loss": 4.5965, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00013404255319148935, |
|
"loss": 4.3645, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00013085106382978724, |
|
"loss": 4.4921, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001276595744680851, |
|
"loss": 4.547, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00012446808510638296, |
|
"loss": 4.6281, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00012127659574468084, |
|
"loss": 4.5267, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00011808510638297871, |
|
"loss": 4.3407, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00011489361702127659, |
|
"loss": 4.2849, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00011170212765957445, |
|
"loss": 4.7138, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00010851063829787234, |
|
"loss": 4.3811, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0001053191489361702, |
|
"loss": 4.4681, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00010212765957446807, |
|
"loss": 4.7141, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.893617021276594e-05, |
|
"loss": 4.5566, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.574468085106382e-05, |
|
"loss": 4.5957, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.25531914893617e-05, |
|
"loss": 4.4626, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.936170212765956e-05, |
|
"loss": 4.2414, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.617021276595745e-05, |
|
"loss": 4.3564, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.297872340425531e-05, |
|
"loss": 4.4139, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.978723404255319e-05, |
|
"loss": 4.5387, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.659574468085105e-05, |
|
"loss": 4.4192, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.340425531914892e-05, |
|
"loss": 4.5384, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 7.02127659574468e-05, |
|
"loss": 4.3597, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.702127659574467e-05, |
|
"loss": 4.3714, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.382978723404255e-05, |
|
"loss": 4.4526, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.063829787234042e-05, |
|
"loss": 4.4474, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.7446808510638294e-05, |
|
"loss": 4.188, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.425531914893617e-05, |
|
"loss": 4.3285, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 5.106382978723404e-05, |
|
"loss": 4.3842, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.787234042553191e-05, |
|
"loss": 4.5096, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.468085106382978e-05, |
|
"loss": 4.3595, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.1489361702127656e-05, |
|
"loss": 4.2159, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.8297872340425525e-05, |
|
"loss": 4.4105, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.51063829787234e-05, |
|
"loss": 4.3474, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 3.1914893617021275e-05, |
|
"loss": 4.2935, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.8723404255319147e-05, |
|
"loss": 4.5936, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.553191489361702e-05, |
|
"loss": 4.5503, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.234042553191489e-05, |
|
"loss": 4.4791, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.9148936170212762e-05, |
|
"loss": 4.3574, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.5957446808510637e-05, |
|
"loss": 4.5734, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.276595744680851e-05, |
|
"loss": 5.0126, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.574468085106381e-06, |
|
"loss": 4.6574, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 6.382978723404255e-06, |
|
"loss": 4.4898, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 594, |
|
"total_flos": 0.0, |
|
"train_loss": 4.478918995520081, |
|
"train_runtime": 5719.3723, |
|
"train_samples_per_second": 4.99, |
|
"train_steps_per_second": 0.104 |
|
} |
|
], |
|
"max_steps": 594, |
|
"num_train_epochs": 1, |
|
"total_flos": 0.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|