|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.995515695067265, |
|
"global_step": 1110, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0, |
|
"loss": 10.1809, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6e-07, |
|
"loss": 10.3677, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6e-07, |
|
"loss": 10.32, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.2e-06, |
|
"loss": 10.153, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.8e-06, |
|
"loss": 10.1314, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.4e-06, |
|
"loss": 9.8055, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.9999999999999997e-06, |
|
"loss": 9.6007, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.6e-06, |
|
"loss": 9.0158, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.2e-06, |
|
"loss": 8.4618, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.8e-06, |
|
"loss": 8.2283, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5.399999999999999e-06, |
|
"loss": 7.7538, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 5.999999999999999e-06, |
|
"loss": 7.5517, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 6.599999999999999e-06, |
|
"loss": 7.3315, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.2e-06, |
|
"loss": 7.1076, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.799999999999998e-06, |
|
"loss": 6.8569, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.4e-06, |
|
"loss": 6.7689, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.999999999999999e-06, |
|
"loss": 6.5553, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 9.6e-06, |
|
"loss": 6.3901, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.02e-05, |
|
"loss": 6.3045, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.0799999999999998e-05, |
|
"loss": 6.1671, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.14e-05, |
|
"loss": 5.9445, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.1999999999999999e-05, |
|
"loss": 5.8393, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.26e-05, |
|
"loss": 5.8426, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.3199999999999997e-05, |
|
"loss": 5.6518, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.3799999999999998e-05, |
|
"loss": 5.7463, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.44e-05, |
|
"loss": 5.6185, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 5.5114, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.5599999999999996e-05, |
|
"loss": 5.3849, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.6199999999999997e-05, |
|
"loss": 5.3192, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.68e-05, |
|
"loss": 5.2528, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.74e-05, |
|
"loss": 5.1951, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.7999999999999997e-05, |
|
"loss": 5.1437, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8599999999999998e-05, |
|
"loss": 5.1332, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.92e-05, |
|
"loss": 5.082, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.98e-05, |
|
"loss": 4.9941, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 2.04e-05, |
|
"loss": 4.977, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.1e-05, |
|
"loss": 4.9351, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 2.1599999999999996e-05, |
|
"loss": 4.9614, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 2.2199999999999998e-05, |
|
"loss": 4.8991, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 2.28e-05, |
|
"loss": 4.9255, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.34e-05, |
|
"loss": 4.8665, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 2.3999999999999997e-05, |
|
"loss": 4.8518, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 2.4599999999999998e-05, |
|
"loss": 4.7914, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 2.52e-05, |
|
"loss": 4.8202, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 2.5799999999999997e-05, |
|
"loss": 4.9285, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.6399999999999995e-05, |
|
"loss": 4.8156, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 2.6999999999999996e-05, |
|
"loss": 4.8383, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 2.7599999999999997e-05, |
|
"loss": 4.8539, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 2.8199999999999998e-05, |
|
"loss": 4.9313, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 2.88e-05, |
|
"loss": 4.9483, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 2.94e-05, |
|
"loss": 5.2754, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 5.3344, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.06e-05, |
|
"loss": 5.2162, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.119999999999999e-05, |
|
"loss": 5.0826, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.1799999999999994e-05, |
|
"loss": 4.8626, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 3.2399999999999995e-05, |
|
"loss": 4.8319, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 3.2999999999999996e-05, |
|
"loss": 4.771, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 3.36e-05, |
|
"loss": 4.6895, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.42e-05, |
|
"loss": 4.693, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 3.48e-05, |
|
"loss": 4.7241, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 3.539999999999999e-05, |
|
"loss": 4.7604, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 3.5999999999999994e-05, |
|
"loss": 4.6755, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.6599999999999995e-05, |
|
"loss": 4.6489, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.7199999999999996e-05, |
|
"loss": 4.658, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 3.78e-05, |
|
"loss": 4.6951, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 3.84e-05, |
|
"loss": 4.5903, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 3.9e-05, |
|
"loss": 4.6142, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 3.96e-05, |
|
"loss": 4.6912, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.02e-05, |
|
"loss": 4.6049, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.08e-05, |
|
"loss": 4.5636, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.14e-05, |
|
"loss": 4.6753, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.2e-05, |
|
"loss": 4.7513, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.259999999999999e-05, |
|
"loss": 4.655, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.319999999999999e-05, |
|
"loss": 4.6118, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.3799999999999994e-05, |
|
"loss": 4.5642, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.4399999999999995e-05, |
|
"loss": 5.0646, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 5.1318, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.56e-05, |
|
"loss": 5.1167, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.62e-05, |
|
"loss": 5.0646, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.68e-05, |
|
"loss": 4.8794, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.7399999999999993e-05, |
|
"loss": 4.6735, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.7999999999999994e-05, |
|
"loss": 4.6389, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.8599999999999995e-05, |
|
"loss": 4.6545, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.9199999999999997e-05, |
|
"loss": 4.6443, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.98e-05, |
|
"loss": 4.6087, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.04e-05, |
|
"loss": 4.5696, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 5.1e-05, |
|
"loss": 4.5662, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 5.1599999999999994e-05, |
|
"loss": 4.6608, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 5.2199999999999995e-05, |
|
"loss": 4.5441, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.279999999999999e-05, |
|
"loss": 4.6029, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 5.339999999999999e-05, |
|
"loss": 4.4904, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 5.399999999999999e-05, |
|
"loss": 4.5875, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 5.459999999999999e-05, |
|
"loss": 4.5483, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 5.519999999999999e-05, |
|
"loss": 4.5813, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 5.5799999999999994e-05, |
|
"loss": 4.5633, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 5.6399999999999995e-05, |
|
"loss": 4.5124, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 5.6999999999999996e-05, |
|
"loss": 4.5139, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 5.76e-05, |
|
"loss": 4.5894, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 5.82e-05, |
|
"loss": 4.5597, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 5.88e-05, |
|
"loss": 4.492, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 5.94e-05, |
|
"loss": 5.1473, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 5.2295, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 6.0599999999999996e-05, |
|
"loss": 5.1966, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 6.12e-05, |
|
"loss": 4.9692, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.18e-05, |
|
"loss": 4.8494, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 6.239999999999999e-05, |
|
"loss": 4.6873, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.299999999999999e-05, |
|
"loss": 4.639, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.359999999999999e-05, |
|
"loss": 4.5857, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.419999999999999e-05, |
|
"loss": 4.5445, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 6.479999999999999e-05, |
|
"loss": 4.6775, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 6.539999999999999e-05, |
|
"loss": 4.7204, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 6.599999999999999e-05, |
|
"loss": 6.9572, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.659999999999999e-05, |
|
"loss": 4.5871, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 6.72e-05, |
|
"loss": 4.4989, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 6.78e-05, |
|
"loss": 4.4903, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 6.84e-05, |
|
"loss": 4.4851, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 6.9e-05, |
|
"loss": 4.4768, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 6.96e-05, |
|
"loss": 4.4474, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 7.02e-05, |
|
"loss": 4.4565, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 7.079999999999999e-05, |
|
"loss": 4.4612, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 7.139999999999999e-05, |
|
"loss": 4.4651, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 7.199999999999999e-05, |
|
"loss": 4.3432, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 7.259999999999999e-05, |
|
"loss": 4.3871, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 7.319999999999999e-05, |
|
"loss": 4.3813, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 7.379999999999999e-05, |
|
"loss": 4.3116, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 7.439999999999999e-05, |
|
"loss": 4.2469, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 7.5e-05, |
|
"loss": 4.2718, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 7.56e-05, |
|
"loss": 4.3074, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 7.62e-05, |
|
"loss": 4.3431, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 7.68e-05, |
|
"loss": 4.3321, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 7.74e-05, |
|
"loss": 4.2847, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 7.8e-05, |
|
"loss": 4.3293, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 7.86e-05, |
|
"loss": 4.3492, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 7.92e-05, |
|
"loss": 4.2798, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 7.98e-05, |
|
"loss": 4.3546, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 8.04e-05, |
|
"loss": 4.235, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 8.1e-05, |
|
"loss": 5.5645, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 8.16e-05, |
|
"loss": 5.6934, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 8.22e-05, |
|
"loss": 5.5843, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 8.28e-05, |
|
"loss": 5.3033, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 8.34e-05, |
|
"loss": 4.9826, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 8.4e-05, |
|
"loss": 4.7511, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 8.459999999999998e-05, |
|
"loss": 4.5923, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.519999999999998e-05, |
|
"loss": 4.4865, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.579999999999998e-05, |
|
"loss": 4.4707, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 8.639999999999999e-05, |
|
"loss": 4.4407, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 8.699999999999999e-05, |
|
"loss": 4.4704, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 8.759999999999999e-05, |
|
"loss": 4.3955, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 8.819999999999999e-05, |
|
"loss": 4.4305, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 8.879999999999999e-05, |
|
"loss": 4.4178, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 8.939999999999999e-05, |
|
"loss": 4.3738, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 4.345, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 9.059999999999999e-05, |
|
"loss": 4.3189, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 9.12e-05, |
|
"loss": 4.3548, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 9.18e-05, |
|
"loss": 4.3476, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 9.24e-05, |
|
"loss": 4.3373, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 9.3e-05, |
|
"loss": 4.3655, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 9.36e-05, |
|
"loss": 4.3406, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 9.419999999999999e-05, |
|
"loss": 4.2992, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 9.479999999999999e-05, |
|
"loss": 4.3091, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 9.539999999999999e-05, |
|
"loss": 4.2523, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 9.599999999999999e-05, |
|
"loss": 5.3727, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 9.659999999999999e-05, |
|
"loss": 5.5254, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 9.719999999999999e-05, |
|
"loss": 5.3926, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 9.779999999999999e-05, |
|
"loss": 5.1976, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 9.839999999999999e-05, |
|
"loss": 4.84, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 9.9e-05, |
|
"loss": 4.6971, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 9.96e-05, |
|
"loss": 4.6292, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0001002, |
|
"loss": 4.5307, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0001008, |
|
"loss": 4.4417, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0001014, |
|
"loss": 4.471, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.000102, |
|
"loss": 4.4859, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0001026, |
|
"loss": 4.4296, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00010319999999999999, |
|
"loss": 4.415, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00010379999999999999, |
|
"loss": 4.346, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00010439999999999999, |
|
"loss": 4.3523, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00010499999999999999, |
|
"loss": 4.2742, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00010559999999999998, |
|
"loss": 4.3161, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00010619999999999998, |
|
"loss": 4.3355, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00010679999999999998, |
|
"loss": 4.2709, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00010739999999999998, |
|
"loss": 4.2814, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00010799999999999998, |
|
"loss": 4.282, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00010859999999999998, |
|
"loss": 4.2875, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00010919999999999998, |
|
"loss": 4.3036, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00010979999999999999, |
|
"loss": 4.2712, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00011039999999999999, |
|
"loss": 4.0503, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00011099999999999999, |
|
"loss": 5.436, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00011159999999999999, |
|
"loss": 5.7089, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00011219999999999999, |
|
"loss": 5.6178, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00011279999999999999, |
|
"loss": 5.3762, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00011339999999999999, |
|
"loss": 4.9978, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00011399999999999999, |
|
"loss": 4.7483, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0001146, |
|
"loss": 4.6654, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0001152, |
|
"loss": 4.585, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0001158, |
|
"loss": 4.4605, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0001164, |
|
"loss": 4.4682, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.000117, |
|
"loss": 4.4438, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0001176, |
|
"loss": 4.3513, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0001182, |
|
"loss": 4.3653, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.0001188, |
|
"loss": 4.2853, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0001194, |
|
"loss": 4.2774, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 4.3431, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00012059999999999999, |
|
"loss": 4.2969, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00012119999999999999, |
|
"loss": 4.3317, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00012179999999999999, |
|
"loss": 4.2546, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001224, |
|
"loss": 4.3031, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00012299999999999998, |
|
"loss": 4.2682, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0001236, |
|
"loss": 4.3388, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00012419999999999998, |
|
"loss": 4.2636, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00012479999999999997, |
|
"loss": 4.1723, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00012539999999999999, |
|
"loss": 4.2299, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00012599999999999997, |
|
"loss": 5.1353, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0001266, |
|
"loss": 5.3656, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00012719999999999997, |
|
"loss": 5.2901, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.0001278, |
|
"loss": 4.9679, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00012839999999999998, |
|
"loss": 4.74, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.000129, |
|
"loss": 4.6402, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00012959999999999998, |
|
"loss": 4.5565, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0001302, |
|
"loss": 4.3116, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00013079999999999998, |
|
"loss": 4.3372, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0001314, |
|
"loss": 4.4229, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00013199999999999998, |
|
"loss": 4.2879, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0001326, |
|
"loss": 6.599, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00013319999999999999, |
|
"loss": 4.4173, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0001338, |
|
"loss": 4.423, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0001344, |
|
"loss": 4.3792, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.000135, |
|
"loss": 4.3279, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0001356, |
|
"loss": 4.2576, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0001362, |
|
"loss": 4.1748, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0001368, |
|
"loss": 4.1601, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.0001374, |
|
"loss": 4.1532, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.000138, |
|
"loss": 4.08, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0001386, |
|
"loss": 4.089, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0001392, |
|
"loss": 4.0562, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.00013979999999999998, |
|
"loss": 4.0354, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0001404, |
|
"loss": 3.9942, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.00014099999999999998, |
|
"loss": 3.9382, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00014159999999999997, |
|
"loss": 3.8565, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0001422, |
|
"loss": 3.8866, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.00014279999999999997, |
|
"loss": 3.8903, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.0001434, |
|
"loss": 3.801, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00014399999999999998, |
|
"loss": 3.8563, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0001446, |
|
"loss": 3.8008, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00014519999999999998, |
|
"loss": 3.7619, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.0001458, |
|
"loss": 3.7284, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.00014639999999999998, |
|
"loss": 3.6987, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.000147, |
|
"loss": 3.5216, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.00014759999999999998, |
|
"loss": 5.8039, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.0001482, |
|
"loss": 5.738, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.00014879999999999998, |
|
"loss": 5.4078, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0001494, |
|
"loss": 5.0056, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00015, |
|
"loss": 4.8029, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00015059999999999997, |
|
"loss": 4.6233, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.0001512, |
|
"loss": 4.4243, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.00015179999999999998, |
|
"loss": 4.2563, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0001524, |
|
"loss": 4.2108, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00015299999999999998, |
|
"loss": 4.1417, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.0001536, |
|
"loss": 4.2294, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00015419999999999998, |
|
"loss": 4.1063, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.0001548, |
|
"loss": 4.0417, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.00015539999999999998, |
|
"loss": 3.9855, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.000156, |
|
"loss": 3.9049, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00015659999999999998, |
|
"loss": 3.9212, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0001572, |
|
"loss": 3.8797, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.0001578, |
|
"loss": 3.9043, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.0001584, |
|
"loss": 3.8784, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.000159, |
|
"loss": 3.8376, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0001596, |
|
"loss": 3.916, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.0001602, |
|
"loss": 3.9107, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.0001608, |
|
"loss": 3.7992, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.0001614, |
|
"loss": 3.6413, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.000162, |
|
"loss": 3.5566, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.0001626, |
|
"loss": 5.6041, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0001632, |
|
"loss": 5.8567, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0001638, |
|
"loss": 5.6862, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0001644, |
|
"loss": 5.4036, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.000165, |
|
"loss": 4.9913, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.0001656, |
|
"loss": 4.7303, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.0001662, |
|
"loss": 4.5465, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0001668, |
|
"loss": 4.33, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.0001674, |
|
"loss": 4.2422, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.000168, |
|
"loss": 4.2522, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0001686, |
|
"loss": 4.1577, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.00016919999999999997, |
|
"loss": 4.1028, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.00016979999999999998, |
|
"loss": 4.1082, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.00017039999999999997, |
|
"loss": 4.0649, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.00017099999999999998, |
|
"loss": 4.0507, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00017159999999999997, |
|
"loss": 3.921, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.00017219999999999998, |
|
"loss": 3.9083, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.00017279999999999997, |
|
"loss": 3.8807, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.00017339999999999996, |
|
"loss": 3.9007, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.00017399999999999997, |
|
"loss": 3.8992, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00017459999999999996, |
|
"loss": 3.9531, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00017519999999999998, |
|
"loss": 3.8481, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00017579999999999996, |
|
"loss": 3.7191, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.00017639999999999998, |
|
"loss": 3.5684, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.00017699999999999997, |
|
"loss": 3.4456, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.00017759999999999998, |
|
"loss": 5.4417, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.00017819999999999997, |
|
"loss": 5.4952, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.00017879999999999998, |
|
"loss": 5.2771, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.00017939999999999997, |
|
"loss": 4.9689, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 4.709, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00018059999999999997, |
|
"loss": 4.507, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00018119999999999999, |
|
"loss": 4.3383, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00018179999999999997, |
|
"loss": 4.2212, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.0001824, |
|
"loss": 4.2254, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.00018299999999999998, |
|
"loss": 4.2006, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0001836, |
|
"loss": 4.0609, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00018419999999999998, |
|
"loss": 4.0235, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.0001848, |
|
"loss": 4.0431, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00018539999999999998, |
|
"loss": 3.9653, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.000186, |
|
"loss": 3.9405, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.00018659999999999998, |
|
"loss": 3.8778, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.0001872, |
|
"loss": 3.8905, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.00018779999999999998, |
|
"loss": 3.8976, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.00018839999999999997, |
|
"loss": 3.8547, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.00018899999999999999, |
|
"loss": 3.8121, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00018959999999999997, |
|
"loss": 3.8189, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.0001902, |
|
"loss": 3.7694, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00019079999999999998, |
|
"loss": 3.6308, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.0001914, |
|
"loss": 3.5794, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00019199999999999998, |
|
"loss": 3.3702, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0001926, |
|
"loss": 5.1264, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.00019319999999999998, |
|
"loss": 4.9258, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.0001938, |
|
"loss": 4.3596, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00019439999999999998, |
|
"loss": 4.2863, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.000195, |
|
"loss": 4.1943, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.00019559999999999998, |
|
"loss": 4.2371, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.0001962, |
|
"loss": 4.0108, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00019679999999999999, |
|
"loss": 3.9665, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.0001974, |
|
"loss": 3.9371, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.000198, |
|
"loss": 3.8094, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0001986, |
|
"loss": 3.6996, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.0001992, |
|
"loss": 6.1462, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.0001998, |
|
"loss": 4.3462, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 0.0002004, |
|
"loss": 4.1581, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.000201, |
|
"loss": 4.1832, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.0002016, |
|
"loss": 4.1241, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.0002022, |
|
"loss": 3.9371, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.0002028, |
|
"loss": 3.8334, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.00020339999999999998, |
|
"loss": 3.7693, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.000204, |
|
"loss": 3.7465, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.00020459999999999999, |
|
"loss": 3.6217, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.0002052, |
|
"loss": 3.6467, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.0002058, |
|
"loss": 3.5401, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.00020639999999999998, |
|
"loss": 3.5786, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00020699999999999996, |
|
"loss": 3.4404, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00020759999999999998, |
|
"loss": 3.3864, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.00020819999999999996, |
|
"loss": 3.2147, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.00020879999999999998, |
|
"loss": 3.3568, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.00020939999999999997, |
|
"loss": 3.34, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 3.2556, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.00021059999999999997, |
|
"loss": 3.2095, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 0.00021119999999999996, |
|
"loss": 3.237, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00021179999999999997, |
|
"loss": 3.0777, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.00021239999999999996, |
|
"loss": 2.9569, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 0.00021299999999999997, |
|
"loss": 2.5873, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 0.00021359999999999996, |
|
"loss": 2.4589, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00021419999999999998, |
|
"loss": 5.7497, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.00021479999999999996, |
|
"loss": 5.7524, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.00021539999999999998, |
|
"loss": 5.2796, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.00021599999999999996, |
|
"loss": 4.8365, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.00021659999999999998, |
|
"loss": 4.4809, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.00021719999999999997, |
|
"loss": 4.1632, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 0.00021779999999999998, |
|
"loss": 3.9612, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.00021839999999999997, |
|
"loss": 3.9396, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.00021899999999999998, |
|
"loss": 3.8539, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 0.00021959999999999997, |
|
"loss": 3.7321, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.00022019999999999999, |
|
"loss": 3.6681, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.00022079999999999997, |
|
"loss": 3.6725, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.0002214, |
|
"loss": 3.5996, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.00022199999999999998, |
|
"loss": 3.502, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.0002226, |
|
"loss": 3.4053, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 0.00022319999999999998, |
|
"loss": 3.3827, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.0002238, |
|
"loss": 3.3173, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.00022439999999999998, |
|
"loss": 3.315, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.000225, |
|
"loss": 3.3281, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.00022559999999999998, |
|
"loss": 3.27, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 0.00022619999999999997, |
|
"loss": 3.2502, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.00022679999999999998, |
|
"loss": 3.1024, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.00022739999999999997, |
|
"loss": 2.9408, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.00022799999999999999, |
|
"loss": 2.7313, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.00022859999999999997, |
|
"loss": 2.4126, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.0002292, |
|
"loss": 5.2794, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.00022979999999999997, |
|
"loss": 5.2546, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.0002304, |
|
"loss": 4.9179, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.00023099999999999998, |
|
"loss": 4.6224, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 0.0002316, |
|
"loss": 4.3587, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.00023219999999999998, |
|
"loss": 4.0705, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.0002328, |
|
"loss": 3.9197, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.00023339999999999998, |
|
"loss": 3.8546, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.000234, |
|
"loss": 3.7795, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 0.00023459999999999998, |
|
"loss": 3.7303, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.0002352, |
|
"loss": 3.7024, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 0.00023579999999999999, |
|
"loss": 3.5815, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.0002364, |
|
"loss": 3.5363, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.000237, |
|
"loss": 3.494, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.0002376, |
|
"loss": 3.3856, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 0.0002382, |
|
"loss": 3.3395, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.0002388, |
|
"loss": 3.3198, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 0.0002394, |
|
"loss": 3.311, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 3.2043, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 0.0002406, |
|
"loss": 3.2077, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.00024119999999999998, |
|
"loss": 3.2145, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.0002418, |
|
"loss": 3.0301, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.00024239999999999998, |
|
"loss": 2.8861, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 0.000243, |
|
"loss": 2.633, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.00024359999999999999, |
|
"loss": 2.191, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.00024419999999999997, |
|
"loss": 5.444, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 0.0002448, |
|
"loss": 5.6085, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.00024539999999999995, |
|
"loss": 5.2909, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.00024599999999999996, |
|
"loss": 4.7823, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.0002466, |
|
"loss": 4.4985, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.0002472, |
|
"loss": 4.2019, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.00024779999999999995, |
|
"loss": 4.024, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.00024839999999999997, |
|
"loss": 3.8789, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.000249, |
|
"loss": 3.7719, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.00024959999999999994, |
|
"loss": 3.773, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 0.00025019999999999996, |
|
"loss": 3.5885, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.00025079999999999997, |
|
"loss": 3.5845, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.0002514, |
|
"loss": 3.523, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.00025199999999999995, |
|
"loss": 3.4934, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.00025259999999999996, |
|
"loss": 3.4074, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.0002532, |
|
"loss": 3.3231, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.0002538, |
|
"loss": 3.3202, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.00025439999999999995, |
|
"loss": 3.2468, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.00025499999999999996, |
|
"loss": 3.2139, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 0.0002556, |
|
"loss": 3.1771, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 0.0002562, |
|
"loss": 3.056, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.00025679999999999995, |
|
"loss": 2.9464, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.00025739999999999997, |
|
"loss": 2.7896, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 0.000258, |
|
"loss": 2.5634, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.0002586, |
|
"loss": 2.1404, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.00025919999999999996, |
|
"loss": 5.0964, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.00025979999999999997, |
|
"loss": 5.1443, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 0.0002604, |
|
"loss": 4.5983, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.000261, |
|
"loss": 4.1566, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 0.00026159999999999996, |
|
"loss": 3.7846, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.0002622, |
|
"loss": 3.6671, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 0.0002628, |
|
"loss": 3.4806, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 0.00026339999999999995, |
|
"loss": 3.5046, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.00026399999999999997, |
|
"loss": 3.3366, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.0002646, |
|
"loss": 3.1014, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.0002652, |
|
"loss": 2.7035, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 0.00026579999999999996, |
|
"loss": 5.0943, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.00026639999999999997, |
|
"loss": 3.9256, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 0.000267, |
|
"loss": 3.7867, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 0.0002676, |
|
"loss": 3.6839, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 0.00026819999999999996, |
|
"loss": 3.6042, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 0.0002688, |
|
"loss": 3.3529, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 0.0002694, |
|
"loss": 3.3266, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 0.00027, |
|
"loss": 3.2218, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 0.00027059999999999996, |
|
"loss": 3.1004, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 0.0002712, |
|
"loss": 3.0669, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 0.0002718, |
|
"loss": 2.8998, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 0.0002724, |
|
"loss": 2.8966, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 0.00027299999999999997, |
|
"loss": 2.8114, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 0.0002736, |
|
"loss": 2.6145, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 0.0002742, |
|
"loss": 2.5475, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 0.0002742, |
|
"loss": 2.5106, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 0.0002748, |
|
"loss": 2.4872, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 0.00027539999999999997, |
|
"loss": 2.3962, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 0.000276, |
|
"loss": 2.2508, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 0.0002766, |
|
"loss": 2.201, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 0.0002772, |
|
"loss": 2.1194, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 0.0002778, |
|
"loss": 1.8332, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 0.0002784, |
|
"loss": 1.6656, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 0.000279, |
|
"loss": 1.2958, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 0.00027959999999999997, |
|
"loss": 1.0378, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 0.0002802, |
|
"loss": 5.2736, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 0.0002808, |
|
"loss": 4.6758, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 0.00028139999999999996, |
|
"loss": 4.0751, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 0.00028199999999999997, |
|
"loss": 3.7796, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 0.0002826, |
|
"loss": 3.4917, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 0.00028319999999999994, |
|
"loss": 3.2089, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 0.00028379999999999996, |
|
"loss": 3.0105, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 0.0002844, |
|
"loss": 2.9742, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 0.000285, |
|
"loss": 2.8292, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 0.00028559999999999995, |
|
"loss": 2.7116, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 0.00028619999999999996, |
|
"loss": 2.5631, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 0.0002868, |
|
"loss": 2.5057, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 0.00028739999999999994, |
|
"loss": 2.4719, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 0.00028799999999999995, |
|
"loss": 2.2309, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 0.00028859999999999997, |
|
"loss": 2.2296, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 0.0002892, |
|
"loss": 1.9529, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 0.00028979999999999994, |
|
"loss": 2.0596, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 0.00029039999999999996, |
|
"loss": 1.9794, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 0.00029099999999999997, |
|
"loss": 1.7684, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 0.0002916, |
|
"loss": 1.5721, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 0.00029219999999999995, |
|
"loss": 1.6114, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 0.00029279999999999996, |
|
"loss": 1.4997, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 0.0002934, |
|
"loss": 1.2318, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 0.000294, |
|
"loss": 0.9552, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 0.00029459999999999995, |
|
"loss": 0.8515, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 0.00029519999999999997, |
|
"loss": 6.105, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 0.0002958, |
|
"loss": 4.909, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 0.0002964, |
|
"loss": 3.761, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 0.00029699999999999996, |
|
"loss": 3.2873, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 0.00029759999999999997, |
|
"loss": 2.961, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 0.0002982, |
|
"loss": 2.7605, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"eval_loss": 2.6299471855163574, |
|
"eval_runtime": 336.2822, |
|
"eval_samples_per_second": 7.856, |
|
"eval_steps_per_second": 0.494, |
|
"eval_wer": 1.4451408171360571, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 0.0002988, |
|
"loss": 2.3839, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 0.00029939999999999996, |
|
"loss": 2.2854, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 0.0003, |
|
"loss": 2.1582, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 0.0002995081967213115, |
|
"loss": 1.9157, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 0.0002990163934426229, |
|
"loss": 1.851, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 0.00029852459016393437, |
|
"loss": 1.8247, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 0.00029803278688524587, |
|
"loss": 1.7489, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 0.00029754098360655737, |
|
"loss": 1.6941, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 0.0002970491803278688, |
|
"loss": 1.513, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 0.0002965573770491803, |
|
"loss": 1.401, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 0.0002960655737704918, |
|
"loss": 1.3516, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 0.00029557377049180326, |
|
"loss": 1.27, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 0.0002950819672131147, |
|
"loss": 1.2286, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 0.0002945901639344262, |
|
"loss": 1.1139, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 0.0002940983606557377, |
|
"loss": 1.1178, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 0.00029360655737704916, |
|
"loss": 1.0057, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 0.0002931147540983606, |
|
"loss": 0.8247, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 0.0002926229508196721, |
|
"loss": 0.7677, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 0.00029213114754098355, |
|
"loss": 0.8346, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 0.00029163934426229505, |
|
"loss": 5.4198, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 0.00029114754098360655, |
|
"loss": 4.7027, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 0.000290655737704918, |
|
"loss": 3.6879, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 0.00029016393442622945, |
|
"loss": 2.9378, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 0.00028967213114754095, |
|
"loss": 2.511, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 0.00028918032786885245, |
|
"loss": 2.1899, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 0.0002886885245901639, |
|
"loss": 1.9893, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 0.0002881967213114754, |
|
"loss": 1.7267, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 0.00028770491803278684, |
|
"loss": 1.5322, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 0.00028721311475409834, |
|
"loss": 1.4467, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 0.0002867213114754098, |
|
"loss": 1.3211, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 0.0002862295081967213, |
|
"loss": 1.1658, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 0.0002857377049180328, |
|
"loss": 1.1107, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.00028524590163934424, |
|
"loss": 1.0426, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 0.0002847540983606557, |
|
"loss": 1.1146, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 0.0002842622950819672, |
|
"loss": 0.9751, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 0.0002837704918032787, |
|
"loss": 0.9805, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 0.00028327868852459013, |
|
"loss": 0.9604, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 0.00028278688524590163, |
|
"loss": 0.825, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 0.0002822950819672131, |
|
"loss": 0.8417, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 0.0002818032786885246, |
|
"loss": 0.851, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 0.00028131147540983603, |
|
"loss": 0.6944, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 0.00028081967213114753, |
|
"loss": 0.6604, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 0.00028032786885245903, |
|
"loss": 0.5602, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 0.0002798360655737705, |
|
"loss": 0.577, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 0.0002793442622950819, |
|
"loss": 5.3539, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 0.0002788524590163934, |
|
"loss": 3.4873, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 0.00027836065573770487, |
|
"loss": 1.9983, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 0.00027786885245901637, |
|
"loss": 1.6671, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 0.00027737704918032787, |
|
"loss": 1.535, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 0.0002768852459016393, |
|
"loss": 1.2795, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 0.00027639344262295076, |
|
"loss": 1.1159, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 0.00027590163934426227, |
|
"loss": 0.9898, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 0.00027540983606557377, |
|
"loss": 0.8807, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.0002749180327868852, |
|
"loss": 0.8109, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.00027442622950819666, |
|
"loss": 0.6494, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 0.00027393442622950816, |
|
"loss": 1.9571, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 0.00027344262295081966, |
|
"loss": 1.5384, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 0.0002729508196721311, |
|
"loss": 1.2256, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 0.0002724590163934426, |
|
"loss": 1.0547, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 0.0002719672131147541, |
|
"loss": 0.9737, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 0.00027147540983606556, |
|
"loss": 0.8202, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 0.000270983606557377, |
|
"loss": 0.7816, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 0.0002704918032786885, |
|
"loss": 0.7362, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 0.00027, |
|
"loss": 0.6287, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 0.00026950819672131145, |
|
"loss": 0.5932, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 0.0002690163934426229, |
|
"loss": 0.5249, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 0.0002685245901639344, |
|
"loss": 0.5345, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 0.0002680327868852459, |
|
"loss": 0.5351, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 0.00026754098360655734, |
|
"loss": 0.4818, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 0.00026704918032786885, |
|
"loss": 0.4507, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 0.00026655737704918035, |
|
"loss": 0.4339, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"learning_rate": 0.0002660655737704918, |
|
"loss": 0.4029, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 0.00026557377049180324, |
|
"loss": 0.4651, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 0.00026508196721311474, |
|
"loss": 0.3721, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 0.0002645901639344262, |
|
"loss": 0.3766, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 0.0002640983606557377, |
|
"loss": 0.3318, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 0.00026360655737704913, |
|
"loss": 0.331, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 0.00026311475409836063, |
|
"loss": 0.2748, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 0.0002626229508196721, |
|
"loss": 0.2241, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 0.0002621311475409836, |
|
"loss": 0.3001, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 0.0002616393442622951, |
|
"loss": 3.1772, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 0.00026114754098360653, |
|
"loss": 2.0168, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 0.000260655737704918, |
|
"loss": 1.2479, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 0.0002601639344262295, |
|
"loss": 1.2017, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 0.000259672131147541, |
|
"loss": 1.1079, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 0.0002591803278688524, |
|
"loss": 0.8699, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.0002586885245901639, |
|
"loss": 0.7881, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 0.00025819672131147537, |
|
"loss": 0.7279, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 0.00025770491803278687, |
|
"loss": 0.6572, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 0.0002572131147540983, |
|
"loss": 0.6395, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 0.0002567213114754098, |
|
"loss": 0.5414, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 0.0002562295081967213, |
|
"loss": 0.5121, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 0.00025573770491803277, |
|
"loss": 0.4897, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 0.0002552459016393442, |
|
"loss": 0.4312, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 0.0002547540983606557, |
|
"loss": 0.4397, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 0.0002542622950819672, |
|
"loss": 0.3896, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 0.00025377049180327866, |
|
"loss": 0.3908, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 0.00025327868852459016, |
|
"loss": 0.3858, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 0.0002527868852459016, |
|
"loss": 0.3354, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 0.0002522950819672131, |
|
"loss": 0.3314, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 0.00025180327868852456, |
|
"loss": 0.3332, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 0.00025131147540983606, |
|
"loss": 0.297, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 0.00025081967213114756, |
|
"loss": 0.2414, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 0.000250327868852459, |
|
"loss": 0.2282, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 0.00024983606557377045, |
|
"loss": 0.3007, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 0.00024934426229508195, |
|
"loss": 1.9302, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 0.0002488524590163934, |
|
"loss": 1.4471, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"learning_rate": 0.0002483606557377049, |
|
"loss": 1.0509, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"learning_rate": 0.0002478688524590164, |
|
"loss": 0.9157, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 0.00024737704918032785, |
|
"loss": 0.8162, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 0.0002468852459016393, |
|
"loss": 0.6961, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 0.0002463934426229508, |
|
"loss": 0.6048, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 0.0002459016393442623, |
|
"loss": 0.6278, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 0.00024540983606557374, |
|
"loss": 0.5126, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 0.0002449180327868852, |
|
"loss": 0.4451, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 0.0002444262295081967, |
|
"loss": 0.4496, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 0.00024393442622950816, |
|
"loss": 0.3534, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 0.00024344262295081966, |
|
"loss": 0.3675, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 0.00024295081967213114, |
|
"loss": 0.3927, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 0.0002424590163934426, |
|
"loss": 0.3711, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 0.00024196721311475406, |
|
"loss": 0.3521, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 0.00024147540983606556, |
|
"loss": 0.3142, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 0.00024098360655737703, |
|
"loss": 0.3412, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 0.0002404918032786885, |
|
"loss": 0.2855, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.3093, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 0.00023950819672131145, |
|
"loss": 0.3254, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 0.00023901639344262293, |
|
"loss": 0.3263, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 0.0002385245901639344, |
|
"loss": 0.2527, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 0.00023803278688524587, |
|
"loss": 0.2214, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 0.00023754098360655737, |
|
"loss": 0.305, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 0.00023704918032786882, |
|
"loss": 1.1692, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 0.0002365573770491803, |
|
"loss": 0.832, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 0.00023606557377049177, |
|
"loss": 0.6695, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 0.00023557377049180327, |
|
"loss": 0.6161, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 0.00023508196721311474, |
|
"loss": 0.5685, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 0.00023459016393442622, |
|
"loss": 0.5094, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 0.00023409836065573766, |
|
"loss": 0.4618, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 0.00023360655737704916, |
|
"loss": 0.4221, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 0.00023311475409836064, |
|
"loss": 0.3998, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 0.0002326229508196721, |
|
"loss": 0.369, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 0.00023213114754098358, |
|
"loss": 0.3503, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 0.00023163934426229506, |
|
"loss": 0.3426, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 0.00023114754098360653, |
|
"loss": 0.3081, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 0.000230655737704918, |
|
"loss": 0.321, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 0.00023016393442622948, |
|
"loss": 0.3081, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 0.00022967213114754098, |
|
"loss": 0.3378, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 0.00022918032786885245, |
|
"loss": 0.2946, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 0.0002286885245901639, |
|
"loss": 0.3051, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 0.00022819672131147537, |
|
"loss": 0.2399, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 0.00022770491803278688, |
|
"loss": 0.2973, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 0.00022721311475409835, |
|
"loss": 0.2952, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 0.00022672131147540982, |
|
"loss": 0.2593, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.00022622950819672127, |
|
"loss": 0.2495, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 0.00022573770491803277, |
|
"loss": 0.2275, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 0.00022524590163934424, |
|
"loss": 0.2896, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 0.00022475409836065572, |
|
"loss": 0.8577, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 0.0002242622950819672, |
|
"loss": 0.6569, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"learning_rate": 0.0002237704918032787, |
|
"loss": 0.5387, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 0.00022327868852459014, |
|
"loss": 0.4774, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 0.0002227868852459016, |
|
"loss": 0.4175, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 0.00022229508196721309, |
|
"loss": 0.3658, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 0.00022180327868852459, |
|
"loss": 0.3594, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 0.00022131147540983606, |
|
"loss": 0.3114, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 0.0002208196721311475, |
|
"loss": 0.2732, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 0.00022032786885245898, |
|
"loss": 0.2827, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 0.00021983606557377048, |
|
"loss": 0.2918, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 0.00021934426229508195, |
|
"loss": 0.6729, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 0.00021885245901639343, |
|
"loss": 0.4497, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 0.0002183606557377049, |
|
"loss": 0.3901, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 0.00021786885245901638, |
|
"loss": 0.3872, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 0.00021737704918032785, |
|
"loss": 0.3052, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 0.00021688524590163932, |
|
"loss": 0.2726, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 0.0002163934426229508, |
|
"loss": 0.2653, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 0.0002159016393442623, |
|
"loss": 0.2387, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 0.00021540983606557374, |
|
"loss": 0.2052, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 0.00021491803278688522, |
|
"loss": 0.2342, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 0.0002144262295081967, |
|
"loss": 0.1993, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 0.0002139344262295082, |
|
"loss": 0.1983, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 0.00021344262295081967, |
|
"loss": 0.1869, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 0.00021295081967213114, |
|
"loss": 0.1725, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 0.00021245901639344259, |
|
"loss": 0.1686, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 0.0002119672131147541, |
|
"loss": 0.1684, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 0.00021147540983606556, |
|
"loss": 0.1432, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 0.00021098360655737703, |
|
"loss": 0.1873, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 0.0002104918032786885, |
|
"loss": 0.1583, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.1472, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 0.00020950819672131146, |
|
"loss": 0.1488, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 0.00020901639344262293, |
|
"loss": 0.1143, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 6.21, |
|
"learning_rate": 0.0002085245901639344, |
|
"loss": 0.1343, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 0.0002080327868852459, |
|
"loss": 0.1251, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 0.00020754098360655735, |
|
"loss": 0.1643, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"learning_rate": 0.00020704918032786882, |
|
"loss": 0.7004, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 0.0002065573770491803, |
|
"loss": 0.6188, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 0.0002060655737704918, |
|
"loss": 0.423, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 0.00020557377049180327, |
|
"loss": 0.3769, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 0.00020508196721311475, |
|
"loss": 0.376, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 0.0002045901639344262, |
|
"loss": 0.2984, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 0.0002040983606557377, |
|
"loss": 0.2631, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 0.00020360655737704917, |
|
"loss": 0.254, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 0.00020311475409836064, |
|
"loss": 0.252, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"learning_rate": 0.00020262295081967211, |
|
"loss": 0.2213, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 0.00020213114754098356, |
|
"loss": 0.2044, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 0.00020163934426229506, |
|
"loss": 0.1789, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 6.34, |
|
"learning_rate": 0.00020114754098360653, |
|
"loss": 0.2126, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 0.000200655737704918, |
|
"loss": 0.1658, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"learning_rate": 0.0002001639344262295, |
|
"loss": 0.1732, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 0.00019967213114754098, |
|
"loss": 0.1748, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"learning_rate": 0.00019918032786885243, |
|
"loss": 0.1638, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 0.0001986885245901639, |
|
"loss": 0.157, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 0.0001981967213114754, |
|
"loss": 0.1418, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 0.00019770491803278688, |
|
"loss": 0.1552, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 0.00019721311475409835, |
|
"loss": 0.1415, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 0.0001967213114754098, |
|
"loss": 0.126, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 0.0001962295081967213, |
|
"loss": 0.1129, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"learning_rate": 0.00019573770491803277, |
|
"loss": 0.1441, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 0.00019524590163934425, |
|
"loss": 0.1373, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 0.00019475409836065572, |
|
"loss": 0.5316, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"learning_rate": 0.00019426229508196722, |
|
"loss": 0.4373, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 0.00019377049180327867, |
|
"loss": 0.3649, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 0.00019327868852459014, |
|
"loss": 0.3233, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 0.00019278688524590161, |
|
"loss": 0.2931, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 0.00019229508196721312, |
|
"loss": 0.2771, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 0.0001918032786885246, |
|
"loss": 0.2295, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"learning_rate": 0.00019131147540983604, |
|
"loss": 0.2316, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 0.0001908196721311475, |
|
"loss": 0.2157, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"learning_rate": 0.000190327868852459, |
|
"loss": 0.2125, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 0.00018983606557377048, |
|
"loss": 0.1578, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 0.00018934426229508196, |
|
"loss": 0.1633, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 0.00018885245901639343, |
|
"loss": 0.1553, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 0.00018836065573770488, |
|
"loss": 0.1544, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 0.00018786885245901638, |
|
"loss": 0.1958, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 0.00018737704918032785, |
|
"loss": 0.1519, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 0.00018688524590163933, |
|
"loss": 0.1557, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 0.00018639344262295083, |
|
"loss": 0.1656, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"learning_rate": 0.00018590163934426227, |
|
"loss": 0.1322, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 0.00018540983606557375, |
|
"loss": 0.1391, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 0.00018491803278688522, |
|
"loss": 0.151, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 0.00018442622950819672, |
|
"loss": 0.1497, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 0.0001839344262295082, |
|
"loss": 0.1293, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 0.00018344262295081964, |
|
"loss": 0.1088, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 0.00018295081967213112, |
|
"loss": 0.1561, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 0.00018245901639344262, |
|
"loss": 0.5004, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"learning_rate": 0.0001819672131147541, |
|
"loss": 0.4009, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 0.00018147540983606556, |
|
"loss": 0.3291, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 6.71, |
|
"learning_rate": 0.00018098360655737704, |
|
"loss": 0.2757, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 0.00018049180327868848, |
|
"loss": 0.2992, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.2224, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"learning_rate": 0.00017950819672131146, |
|
"loss": 0.2268, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"learning_rate": 0.00017901639344262293, |
|
"loss": 0.2091, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 0.00017852459016393443, |
|
"loss": 0.2108, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 0.00017803278688524588, |
|
"loss": 0.1898, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 6.77, |
|
"learning_rate": 0.00017754098360655735, |
|
"loss": 0.1764, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 0.00017704918032786883, |
|
"loss": 0.1669, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 6.79, |
|
"learning_rate": 0.00017655737704918033, |
|
"loss": 0.1502, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 0.0001760655737704918, |
|
"loss": 0.1734, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 0.00017557377049180327, |
|
"loss": 0.1519, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 0.00017508196721311472, |
|
"loss": 0.1561, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 0.0001745901639344262, |
|
"loss": 0.1471, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 0.0001740983606557377, |
|
"loss": 0.1443, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 0.00017360655737704917, |
|
"loss": 0.1161, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 0.00017311475409836064, |
|
"loss": 0.1166, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 0.0001726229508196721, |
|
"loss": 0.1304, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"learning_rate": 0.0001721311475409836, |
|
"loss": 0.12, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 0.00017163934426229506, |
|
"loss": 0.1263, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 0.00017114754098360654, |
|
"loss": 0.1376, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 0.00017065573770491804, |
|
"loss": 0.1581, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 0.0001701639344262295, |
|
"loss": 0.398, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 0.00016967213114754096, |
|
"loss": 0.3025, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"learning_rate": 0.00016918032786885243, |
|
"loss": 0.2683, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 0.00016868852459016393, |
|
"loss": 0.2023, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"learning_rate": 0.0001681967213114754, |
|
"loss": 0.184, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 0.00016770491803278688, |
|
"loss": 0.1605, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 0.00016721311475409833, |
|
"loss": 0.142, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 0.0001667213114754098, |
|
"loss": 0.1362, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"learning_rate": 0.0001662295081967213, |
|
"loss": 0.1242, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"learning_rate": 0.00016573770491803278, |
|
"loss": 0.1229, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 0.00016524590163934425, |
|
"loss": 0.1273, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 0.00016475409836065575, |
|
"loss": 0.3292, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 0.0001642622950819672, |
|
"loss": 0.1917, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 0.00016377049180327867, |
|
"loss": 0.1808, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 0.00016327868852459014, |
|
"loss": 0.1459, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 0.00016278688524590164, |
|
"loss": 0.1363, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 0.00016229508196721312, |
|
"loss": 0.1299, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 0.00016180327868852456, |
|
"loss": 0.1337, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 0.00016131147540983604, |
|
"loss": 0.1175, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 0.00016081967213114754, |
|
"loss": 0.1139, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 0.000160327868852459, |
|
"loss": 0.1132, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 0.00015983606557377049, |
|
"loss": 0.0938, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 0.00015934426229508193, |
|
"loss": 0.0853, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"learning_rate": 0.0001588524590163934, |
|
"loss": 0.0911, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 0.0001583606557377049, |
|
"loss": 0.0876, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 0.00015786885245901638, |
|
"loss": 0.0952, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 0.00015737704918032785, |
|
"loss": 0.0965, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 0.00015688524590163936, |
|
"loss": 0.0917, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 0.0001563934426229508, |
|
"loss": 0.0931, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 7.17, |
|
"learning_rate": 0.00015590163934426228, |
|
"loss": 0.0801, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"learning_rate": 0.00015540983606557375, |
|
"loss": 0.081, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 0.00015491803278688525, |
|
"loss": 0.0888, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 0.00015442622950819672, |
|
"loss": 0.0644, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 0.00015393442622950817, |
|
"loss": 0.0697, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 0.00015344262295081964, |
|
"loss": 0.0626, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 0.00015295081967213112, |
|
"loss": 0.0777, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 0.00015245901639344262, |
|
"loss": 0.2966, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 7.24, |
|
"learning_rate": 0.0001519672131147541, |
|
"loss": 0.2267, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 0.00015147540983606557, |
|
"loss": 0.1676, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 0.000150983606557377, |
|
"loss": 0.18, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 0.0001504918032786885, |
|
"loss": 0.1531, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 0.00015, |
|
"loss": 0.1334, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 0.00014950819672131146, |
|
"loss": 0.1211, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 0.00014901639344262293, |
|
"loss": 0.1199, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 0.0001485245901639344, |
|
"loss": 0.1123, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 0.0001480327868852459, |
|
"loss": 0.1059, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 0.00014754098360655736, |
|
"loss": 0.0931, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 0.00014704918032786886, |
|
"loss": 0.0969, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 7.34, |
|
"learning_rate": 0.0001465573770491803, |
|
"loss": 0.0967, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 0.00014606557377049178, |
|
"loss": 0.0934, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 0.00014557377049180328, |
|
"loss": 0.0883, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"learning_rate": 0.00014508196721311472, |
|
"loss": 0.0828, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 0.00014459016393442622, |
|
"loss": 0.0905, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"learning_rate": 0.0001440983606557377, |
|
"loss": 0.0853, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"learning_rate": 0.00014360655737704917, |
|
"loss": 0.0755, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 0.00014311475409836065, |
|
"loss": 0.0726, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 0.00014262295081967212, |
|
"loss": 0.0775, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"learning_rate": 0.0001421311475409836, |
|
"loss": 0.0798, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 0.00014163934426229507, |
|
"loss": 0.0591, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 0.00014114754098360654, |
|
"loss": 0.0644, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 0.00014065573770491801, |
|
"loss": 0.0976, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 0.00014016393442622951, |
|
"loss": 0.2755, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 0.00013967213114754096, |
|
"loss": 0.2098, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 0.00013918032786885243, |
|
"loss": 0.1766, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 0.00013868852459016394, |
|
"loss": 0.1588, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"learning_rate": 0.00013819672131147538, |
|
"loss": 0.143, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 0.00013770491803278688, |
|
"loss": 0.1298, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 0.00013721311475409833, |
|
"loss": 0.1163, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"learning_rate": 0.00013672131147540983, |
|
"loss": 0.1181, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"learning_rate": 0.0001362295081967213, |
|
"loss": 0.116, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"learning_rate": 0.00013573770491803278, |
|
"loss": 0.0969, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 0.00013524590163934425, |
|
"loss": 0.0956, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 0.00013475409836065573, |
|
"loss": 0.0921, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 7.57, |
|
"learning_rate": 0.0001342622950819672, |
|
"loss": 0.1068, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 7.57, |
|
"learning_rate": 0.00013377049180327867, |
|
"loss": 0.0846, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 0.00013327868852459017, |
|
"loss": 0.0797, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 7.59, |
|
"learning_rate": 0.00013278688524590162, |
|
"loss": 0.0789, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 0.0001322950819672131, |
|
"loss": 0.0802, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 7.61, |
|
"learning_rate": 0.00013180327868852457, |
|
"loss": 0.0798, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 0.00013131147540983604, |
|
"loss": 0.0625, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 7.63, |
|
"learning_rate": 0.00013081967213114754, |
|
"loss": 0.0729, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 0.000130327868852459, |
|
"loss": 0.0702, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 0.0001298360655737705, |
|
"loss": 0.0749, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 0.00012934426229508196, |
|
"loss": 0.1538, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 0.00012885245901639344, |
|
"loss": 0.0793, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 0.0001283606557377049, |
|
"loss": 0.1118, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"learning_rate": 0.00012786885245901638, |
|
"loss": 0.2584, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 0.00012737704918032786, |
|
"loss": 0.2042, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 0.00012688524590163933, |
|
"loss": 0.1538, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 7.71, |
|
"learning_rate": 0.0001263934426229508, |
|
"loss": 0.1337, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"learning_rate": 0.00012590163934426228, |
|
"loss": 0.1333, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 7.73, |
|
"learning_rate": 0.00012540983606557378, |
|
"loss": 0.1308, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"learning_rate": 0.00012491803278688523, |
|
"loss": 0.1062, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"learning_rate": 0.0001244262295081967, |
|
"loss": 0.1091, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"learning_rate": 0.0001239344262295082, |
|
"loss": 0.1118, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"learning_rate": 0.00012344262295081965, |
|
"loss": 0.1769, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"learning_rate": 0.00012295081967213115, |
|
"loss": 0.0905, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 0.0001224590163934426, |
|
"loss": 0.1006, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"learning_rate": 0.00012196721311475408, |
|
"loss": 0.0995, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 0.00012147540983606557, |
|
"loss": 0.0921, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 7.81, |
|
"learning_rate": 0.00012098360655737703, |
|
"loss": 0.0971, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"learning_rate": 0.00012049180327868852, |
|
"loss": 0.0774, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 0.0866, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 0.00011950819672131146, |
|
"loss": 0.0981, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 0.00011901639344262294, |
|
"loss": 0.072, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"learning_rate": 0.00011852459016393441, |
|
"loss": 0.0674, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"learning_rate": 0.00011803278688524588, |
|
"loss": 0.0639, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 7.87, |
|
"learning_rate": 0.00011754098360655737, |
|
"loss": 0.0839, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 7.88, |
|
"learning_rate": 0.00011704918032786883, |
|
"loss": 0.066, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 7.89, |
|
"learning_rate": 0.00011655737704918032, |
|
"loss": 0.0597, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 0.00011606557377049179, |
|
"loss": 0.0819, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 0.00011557377049180327, |
|
"loss": 0.1785, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 0.00011508196721311474, |
|
"loss": 0.1619, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"learning_rate": 0.00011459016393442623, |
|
"loss": 0.1235, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 0.00011409836065573769, |
|
"loss": 0.0996, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 0.00011360655737704917, |
|
"loss": 0.0896, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 7.95, |
|
"learning_rate": 0.00011311475409836063, |
|
"loss": 0.0785, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"learning_rate": 0.00011262295081967212, |
|
"loss": 0.0756, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 7.97, |
|
"learning_rate": 0.0001121311475409836, |
|
"loss": 0.094, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 7.98, |
|
"learning_rate": 0.00011163934426229507, |
|
"loss": 0.0629, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 7.99, |
|
"learning_rate": 0.00011114754098360654, |
|
"loss": 0.0694, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 0.00011065573770491803, |
|
"loss": 0.0818, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 0.00011016393442622949, |
|
"loss": 0.1714, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 0.00010967213114754098, |
|
"loss": 0.1262, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 0.00010918032786885245, |
|
"loss": 0.089, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 0.00010868852459016392, |
|
"loss": 0.1003, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 0.0001081967213114754, |
|
"loss": 0.0913, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 0.00010770491803278687, |
|
"loss": 0.0799, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 0.00010721311475409835, |
|
"loss": 0.0765, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"learning_rate": 0.00010672131147540983, |
|
"loss": 0.0802, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 0.00010622950819672129, |
|
"loss": 0.0641, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 0.00010573770491803278, |
|
"loss": 0.0614, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 0.00010524590163934425, |
|
"loss": 0.064, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 0.00010475409836065573, |
|
"loss": 0.0568, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 0.0001042622950819672, |
|
"loss": 0.0609, |
|
"step": 901 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"learning_rate": 0.00010377049180327867, |
|
"loss": 0.0569, |
|
"step": 902 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"learning_rate": 0.00010327868852459015, |
|
"loss": 0.0584, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"learning_rate": 0.00010278688524590164, |
|
"loss": 0.0536, |
|
"step": 904 |
|
}, |
|
{ |
|
"epoch": 8.15, |
|
"learning_rate": 0.0001022950819672131, |
|
"loss": 0.0542, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 8.16, |
|
"learning_rate": 0.00010180327868852458, |
|
"loss": 0.0597, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 0.00010131147540983606, |
|
"loss": 0.0608, |
|
"step": 907 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"learning_rate": 0.00010081967213114753, |
|
"loss": 0.0479, |
|
"step": 908 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 0.000100327868852459, |
|
"loss": 0.0487, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 8.2, |
|
"learning_rate": 9.983606557377049e-05, |
|
"loss": 0.0546, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 9.934426229508195e-05, |
|
"loss": 0.0393, |
|
"step": 911 |
|
}, |
|
{ |
|
"epoch": 8.22, |
|
"learning_rate": 9.885245901639344e-05, |
|
"loss": 0.0577, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 8.22, |
|
"learning_rate": 9.83606557377049e-05, |
|
"loss": 0.0428, |
|
"step": 913 |
|
}, |
|
{ |
|
"epoch": 8.23, |
|
"learning_rate": 9.786885245901639e-05, |
|
"loss": 0.1259, |
|
"step": 914 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 9.737704918032786e-05, |
|
"loss": 0.1007, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"learning_rate": 9.688524590163933e-05, |
|
"loss": 0.0901, |
|
"step": 916 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 9.639344262295081e-05, |
|
"loss": 0.0894, |
|
"step": 917 |
|
}, |
|
{ |
|
"epoch": 8.27, |
|
"learning_rate": 9.59016393442623e-05, |
|
"loss": 0.0852, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"learning_rate": 9.540983606557375e-05, |
|
"loss": 0.0744, |
|
"step": 919 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 9.491803278688524e-05, |
|
"loss": 0.067, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 9.442622950819672e-05, |
|
"loss": 0.0717, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 9.393442622950819e-05, |
|
"loss": 0.0604, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 8.31, |
|
"learning_rate": 9.344262295081966e-05, |
|
"loss": 0.0617, |
|
"step": 923 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 9.295081967213114e-05, |
|
"loss": 0.0527, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 9.245901639344261e-05, |
|
"loss": 0.057, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"learning_rate": 9.19672131147541e-05, |
|
"loss": 0.0499, |
|
"step": 926 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 9.147540983606556e-05, |
|
"loss": 0.0535, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 8.36, |
|
"learning_rate": 9.098360655737704e-05, |
|
"loss": 0.0546, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"learning_rate": 9.049180327868852e-05, |
|
"loss": 0.0565, |
|
"step": 929 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 0.0507, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 8.39, |
|
"learning_rate": 8.950819672131147e-05, |
|
"loss": 0.0496, |
|
"step": 931 |
|
}, |
|
{ |
|
"epoch": 8.39, |
|
"learning_rate": 8.901639344262294e-05, |
|
"loss": 0.0445, |
|
"step": 932 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 8.852459016393441e-05, |
|
"loss": 0.0436, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 8.80327868852459e-05, |
|
"loss": 0.0449, |
|
"step": 934 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"learning_rate": 8.754098360655736e-05, |
|
"loss": 0.0465, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 8.43, |
|
"learning_rate": 8.704918032786885e-05, |
|
"loss": 0.0365, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 8.44, |
|
"learning_rate": 8.655737704918032e-05, |
|
"loss": 0.0361, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 8.45, |
|
"learning_rate": 8.60655737704918e-05, |
|
"loss": 0.0566, |
|
"step": 938 |
|
}, |
|
{ |
|
"epoch": 8.46, |
|
"learning_rate": 8.557377049180327e-05, |
|
"loss": 0.1376, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 8.508196721311476e-05, |
|
"loss": 0.1047, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 8.48, |
|
"learning_rate": 8.459016393442622e-05, |
|
"loss": 0.0878, |
|
"step": 941 |
|
}, |
|
{ |
|
"epoch": 8.48, |
|
"learning_rate": 8.40983606557377e-05, |
|
"loss": 0.0905, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 8.49, |
|
"learning_rate": 8.360655737704916e-05, |
|
"loss": 0.0747, |
|
"step": 943 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 8.311475409836065e-05, |
|
"loss": 0.0621, |
|
"step": 944 |
|
}, |
|
{ |
|
"epoch": 8.51, |
|
"learning_rate": 8.262295081967212e-05, |
|
"loss": 0.075, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 8.21311475409836e-05, |
|
"loss": 0.0683, |
|
"step": 946 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"learning_rate": 8.163934426229507e-05, |
|
"loss": 0.0557, |
|
"step": 947 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"learning_rate": 8.114754098360656e-05, |
|
"loss": 0.0588, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"learning_rate": 8.065573770491802e-05, |
|
"loss": 0.0597, |
|
"step": 949 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"learning_rate": 8.01639344262295e-05, |
|
"loss": 0.0598, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 7.967213114754097e-05, |
|
"loss": 0.0542, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 7.918032786885245e-05, |
|
"loss": 0.051, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 8.58, |
|
"learning_rate": 7.868852459016393e-05, |
|
"loss": 0.0587, |
|
"step": 953 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"learning_rate": 7.81967213114754e-05, |
|
"loss": 0.0526, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 7.770491803278687e-05, |
|
"loss": 0.0481, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 8.61, |
|
"learning_rate": 7.721311475409836e-05, |
|
"loss": 0.0511, |
|
"step": 956 |
|
}, |
|
{ |
|
"epoch": 8.62, |
|
"learning_rate": 7.672131147540982e-05, |
|
"loss": 0.038, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 7.622950819672131e-05, |
|
"loss": 0.0412, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"learning_rate": 7.573770491803278e-05, |
|
"loss": 0.052, |
|
"step": 959 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 7.524590163934426e-05, |
|
"loss": 0.0426, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 7.475409836065573e-05, |
|
"loss": 0.0514, |
|
"step": 961 |
|
}, |
|
{ |
|
"epoch": 8.66, |
|
"learning_rate": 7.42622950819672e-05, |
|
"loss": 0.0348, |
|
"step": 962 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"learning_rate": 7.377049180327868e-05, |
|
"loss": 0.0697, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 8.68, |
|
"learning_rate": 7.327868852459015e-05, |
|
"loss": 0.1125, |
|
"step": 964 |
|
}, |
|
{ |
|
"epoch": 8.69, |
|
"learning_rate": 7.278688524590164e-05, |
|
"loss": 0.0963, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 7.229508196721311e-05, |
|
"loss": 0.0785, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 7.180327868852459e-05, |
|
"loss": 0.0818, |
|
"step": 967 |
|
}, |
|
{ |
|
"epoch": 8.72, |
|
"learning_rate": 7.131147540983606e-05, |
|
"loss": 0.0798, |
|
"step": 968 |
|
}, |
|
{ |
|
"epoch": 8.73, |
|
"learning_rate": 7.081967213114753e-05, |
|
"loss": 0.064, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 8.74, |
|
"learning_rate": 7.032786885245901e-05, |
|
"loss": 0.0586, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 8.74, |
|
"learning_rate": 6.983606557377048e-05, |
|
"loss": 0.0594, |
|
"step": 971 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"learning_rate": 6.934426229508197e-05, |
|
"loss": 0.0661, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 8.76, |
|
"learning_rate": 6.885245901639344e-05, |
|
"loss": 0.0698, |
|
"step": 973 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"learning_rate": 6.836065573770492e-05, |
|
"loss": 0.0558, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 8.78, |
|
"learning_rate": 6.786885245901639e-05, |
|
"loss": 0.0524, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 6.737704918032786e-05, |
|
"loss": 0.0562, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"learning_rate": 6.688524590163934e-05, |
|
"loss": 0.0507, |
|
"step": 977 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"learning_rate": 6.639344262295081e-05, |
|
"loss": 0.0477, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 6.590163934426228e-05, |
|
"loss": 0.0525, |
|
"step": 979 |
|
}, |
|
{ |
|
"epoch": 8.83, |
|
"learning_rate": 6.540983606557377e-05, |
|
"loss": 0.0456, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 8.83, |
|
"learning_rate": 6.491803278688524e-05, |
|
"loss": 0.0446, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"learning_rate": 6.442622950819672e-05, |
|
"loss": 0.0443, |
|
"step": 982 |
|
}, |
|
{ |
|
"epoch": 8.85, |
|
"learning_rate": 6.393442622950819e-05, |
|
"loss": 0.0344, |
|
"step": 983 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"learning_rate": 6.344262295081967e-05, |
|
"loss": 0.0557, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 8.87, |
|
"learning_rate": 6.295081967213114e-05, |
|
"loss": 0.0299, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"learning_rate": 6.245901639344261e-05, |
|
"loss": 0.0364, |
|
"step": 986 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 6.19672131147541e-05, |
|
"loss": 0.0459, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"learning_rate": 6.147540983606557e-05, |
|
"loss": 0.0529, |
|
"step": 988 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"learning_rate": 6.098360655737704e-05, |
|
"loss": 0.1122, |
|
"step": 989 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"learning_rate": 6.0491803278688514e-05, |
|
"loss": 0.0814, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 0.0625, |
|
"step": 991 |
|
}, |
|
{ |
|
"epoch": 8.93, |
|
"learning_rate": 5.950819672131147e-05, |
|
"loss": 0.0625, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"learning_rate": 5.901639344262294e-05, |
|
"loss": 0.0568, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 8.95, |
|
"learning_rate": 5.8524590163934416e-05, |
|
"loss": 0.0485, |
|
"step": 994 |
|
}, |
|
{ |
|
"epoch": 8.96, |
|
"learning_rate": 5.8032786885245896e-05, |
|
"loss": 0.0434, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"learning_rate": 5.754098360655737e-05, |
|
"loss": 0.0434, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"learning_rate": 5.7049180327868844e-05, |
|
"loss": 0.0416, |
|
"step": 997 |
|
}, |
|
{ |
|
"epoch": 8.99, |
|
"learning_rate": 5.655737704918032e-05, |
|
"loss": 0.0464, |
|
"step": 998 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 5.60655737704918e-05, |
|
"loss": 0.0321, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 5.557377049180327e-05, |
|
"loss": 0.1177, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"eval_loss": 0.35239124298095703, |
|
"eval_runtime": 326.5742, |
|
"eval_samples_per_second": 8.09, |
|
"eval_steps_per_second": 0.508, |
|
"eval_wer": 0.10420468068226894, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 5.5081967213114745e-05, |
|
"loss": 0.0658, |
|
"step": 1001 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 5.4590163934426226e-05, |
|
"loss": 0.0527, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 5.40983606557377e-05, |
|
"loss": 0.0639, |
|
"step": 1003 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 5.360655737704917e-05, |
|
"loss": 0.0622, |
|
"step": 1004 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 5.3114754098360647e-05, |
|
"loss": 0.0566, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"learning_rate": 5.262295081967213e-05, |
|
"loss": 0.0423, |
|
"step": 1006 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 5.21311475409836e-05, |
|
"loss": 0.0496, |
|
"step": 1007 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"learning_rate": 5.1639344262295074e-05, |
|
"loss": 0.0438, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 5.114754098360655e-05, |
|
"loss": 0.04, |
|
"step": 1009 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 5.065573770491803e-05, |
|
"loss": 0.0399, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 9.11, |
|
"learning_rate": 5.01639344262295e-05, |
|
"loss": 0.0494, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 4.9672131147540976e-05, |
|
"loss": 0.0367, |
|
"step": 1012 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"learning_rate": 4.918032786885245e-05, |
|
"loss": 0.0387, |
|
"step": 1013 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"learning_rate": 4.868852459016393e-05, |
|
"loss": 0.0372, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"learning_rate": 4.8196721311475404e-05, |
|
"loss": 0.0334, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 9.15, |
|
"learning_rate": 4.770491803278688e-05, |
|
"loss": 0.0286, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 9.16, |
|
"learning_rate": 4.721311475409836e-05, |
|
"loss": 0.0352, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"learning_rate": 4.672131147540983e-05, |
|
"loss": 0.0299, |
|
"step": 1018 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"learning_rate": 4.6229508196721305e-05, |
|
"loss": 0.0288, |
|
"step": 1019 |
|
}, |
|
{ |
|
"epoch": 9.19, |
|
"learning_rate": 4.573770491803278e-05, |
|
"loss": 0.0322, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 4.524590163934426e-05, |
|
"loss": 0.0304, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 9.21, |
|
"learning_rate": 4.475409836065573e-05, |
|
"loss": 0.0386, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"learning_rate": 4.4262295081967207e-05, |
|
"loss": 0.0332, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"learning_rate": 4.377049180327868e-05, |
|
"loss": 0.0501, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 9.23, |
|
"learning_rate": 4.327868852459016e-05, |
|
"loss": 0.0687, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 9.24, |
|
"learning_rate": 4.2786885245901634e-05, |
|
"loss": 0.071, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 4.229508196721311e-05, |
|
"loss": 0.0587, |
|
"step": 1027 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"learning_rate": 4.180327868852458e-05, |
|
"loss": 0.0558, |
|
"step": 1028 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 4.131147540983606e-05, |
|
"loss": 0.0559, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 4.0819672131147536e-05, |
|
"loss": 0.0525, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 9.29, |
|
"learning_rate": 4.032786885245901e-05, |
|
"loss": 0.0428, |
|
"step": 1031 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 3.983606557377048e-05, |
|
"loss": 0.05, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 3.9344262295081964e-05, |
|
"loss": 0.043, |
|
"step": 1033 |
|
}, |
|
{ |
|
"epoch": 9.31, |
|
"learning_rate": 3.885245901639344e-05, |
|
"loss": 0.0455, |
|
"step": 1034 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"learning_rate": 3.836065573770491e-05, |
|
"loss": 0.0381, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 3.786885245901639e-05, |
|
"loss": 0.0315, |
|
"step": 1036 |
|
}, |
|
{ |
|
"epoch": 9.34, |
|
"learning_rate": 3.7377049180327865e-05, |
|
"loss": 0.0366, |
|
"step": 1037 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"learning_rate": 3.688524590163934e-05, |
|
"loss": 0.0286, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"learning_rate": 3.639344262295082e-05, |
|
"loss": 0.0401, |
|
"step": 1039 |
|
}, |
|
{ |
|
"epoch": 9.37, |
|
"learning_rate": 3.590163934426229e-05, |
|
"loss": 0.0402, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 3.540983606557377e-05, |
|
"loss": 0.0349, |
|
"step": 1041 |
|
}, |
|
{ |
|
"epoch": 9.39, |
|
"learning_rate": 3.491803278688524e-05, |
|
"loss": 0.035, |
|
"step": 1042 |
|
}, |
|
{ |
|
"epoch": 9.39, |
|
"learning_rate": 3.442622950819672e-05, |
|
"loss": 0.0276, |
|
"step": 1043 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 3.3934426229508194e-05, |
|
"loss": 0.0299, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 9.41, |
|
"learning_rate": 3.344262295081967e-05, |
|
"loss": 0.0323, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 9.42, |
|
"learning_rate": 3.295081967213114e-05, |
|
"loss": 0.027, |
|
"step": 1046 |
|
}, |
|
{ |
|
"epoch": 9.43, |
|
"learning_rate": 3.245901639344262e-05, |
|
"loss": 0.0245, |
|
"step": 1047 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"learning_rate": 3.1967213114754096e-05, |
|
"loss": 0.0424, |
|
"step": 1048 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 3.147540983606557e-05, |
|
"loss": 0.0517, |
|
"step": 1049 |
|
}, |
|
{ |
|
"epoch": 9.46, |
|
"learning_rate": 3.098360655737705e-05, |
|
"loss": 0.081, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 3.049180327868852e-05, |
|
"loss": 0.0621, |
|
"step": 1051 |
|
}, |
|
{ |
|
"epoch": 9.48, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.054, |
|
"step": 1052 |
|
}, |
|
{ |
|
"epoch": 9.48, |
|
"learning_rate": 2.950819672131147e-05, |
|
"loss": 0.0524, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 9.49, |
|
"learning_rate": 2.9016393442622948e-05, |
|
"loss": 0.0471, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 2.8524590163934422e-05, |
|
"loss": 0.048, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"learning_rate": 2.80327868852459e-05, |
|
"loss": 0.0498, |
|
"step": 1056 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"learning_rate": 2.7540983606557373e-05, |
|
"loss": 0.0381, |
|
"step": 1057 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 2.704918032786885e-05, |
|
"loss": 0.0503, |
|
"step": 1058 |
|
}, |
|
{ |
|
"epoch": 9.54, |
|
"learning_rate": 2.6557377049180323e-05, |
|
"loss": 0.0317, |
|
"step": 1059 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"learning_rate": 2.60655737704918e-05, |
|
"loss": 0.0365, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 9.56, |
|
"learning_rate": 2.5573770491803274e-05, |
|
"loss": 0.0385, |
|
"step": 1061 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"learning_rate": 2.508196721311475e-05, |
|
"loss": 0.0298, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"learning_rate": 2.4590163934426225e-05, |
|
"loss": 0.0325, |
|
"step": 1063 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"learning_rate": 2.4098360655737702e-05, |
|
"loss": 0.0314, |
|
"step": 1064 |
|
}, |
|
{ |
|
"epoch": 9.59, |
|
"learning_rate": 2.360655737704918e-05, |
|
"loss": 0.0316, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 2.3114754098360653e-05, |
|
"loss": 0.0467, |
|
"step": 1066 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"learning_rate": 2.262295081967213e-05, |
|
"loss": 0.0386, |
|
"step": 1067 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 2.2131147540983603e-05, |
|
"loss": 0.0289, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"learning_rate": 2.163934426229508e-05, |
|
"loss": 0.0298, |
|
"step": 1069 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"learning_rate": 2.1147540983606554e-05, |
|
"loss": 0.0383, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 9.65, |
|
"learning_rate": 2.065573770491803e-05, |
|
"loss": 0.0362, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 9.65, |
|
"learning_rate": 2.0163934426229505e-05, |
|
"loss": 0.0308, |
|
"step": 1072 |
|
}, |
|
{ |
|
"epoch": 9.66, |
|
"learning_rate": 1.9672131147540982e-05, |
|
"loss": 0.0252, |
|
"step": 1073 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 1.9180327868852456e-05, |
|
"loss": 0.0467, |
|
"step": 1074 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"learning_rate": 1.8688524590163933e-05, |
|
"loss": 0.07, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 9.69, |
|
"learning_rate": 1.819672131147541e-05, |
|
"loss": 0.0574, |
|
"step": 1076 |
|
}, |
|
{ |
|
"epoch": 9.7, |
|
"learning_rate": 1.7704918032786883e-05, |
|
"loss": 0.0483, |
|
"step": 1077 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 1.721311475409836e-05, |
|
"loss": 0.0526, |
|
"step": 1078 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"learning_rate": 1.6721311475409834e-05, |
|
"loss": 0.0521, |
|
"step": 1079 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 1.622950819672131e-05, |
|
"loss": 0.0498, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 9.74, |
|
"learning_rate": 1.5737704918032785e-05, |
|
"loss": 0.0438, |
|
"step": 1081 |
|
}, |
|
{ |
|
"epoch": 9.74, |
|
"learning_rate": 1.524590163934426e-05, |
|
"loss": 0.0459, |
|
"step": 1082 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 1.4754098360655736e-05, |
|
"loss": 0.0403, |
|
"step": 1083 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 1.4262295081967211e-05, |
|
"loss": 0.0334, |
|
"step": 1084 |
|
}, |
|
{ |
|
"epoch": 9.77, |
|
"learning_rate": 1.3770491803278686e-05, |
|
"loss": 0.0457, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 9.78, |
|
"learning_rate": 1.3278688524590162e-05, |
|
"loss": 0.0393, |
|
"step": 1086 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"learning_rate": 1.2786885245901637e-05, |
|
"loss": 0.0364, |
|
"step": 1087 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 1.2295081967213112e-05, |
|
"loss": 0.0329, |
|
"step": 1088 |
|
}, |
|
{ |
|
"epoch": 9.81, |
|
"learning_rate": 1.180327868852459e-05, |
|
"loss": 0.042, |
|
"step": 1089 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"learning_rate": 1.1311475409836065e-05, |
|
"loss": 0.0375, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 9.83, |
|
"learning_rate": 1.081967213114754e-05, |
|
"loss": 0.0371, |
|
"step": 1091 |
|
}, |
|
{ |
|
"epoch": 9.83, |
|
"learning_rate": 1.0327868852459016e-05, |
|
"loss": 0.0285, |
|
"step": 1092 |
|
}, |
|
{ |
|
"epoch": 9.84, |
|
"learning_rate": 9.836065573770491e-06, |
|
"loss": 0.0278, |
|
"step": 1093 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 9.344262295081966e-06, |
|
"loss": 0.0275, |
|
"step": 1094 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"learning_rate": 8.852459016393442e-06, |
|
"loss": 0.0324, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 9.87, |
|
"learning_rate": 8.360655737704917e-06, |
|
"loss": 0.0287, |
|
"step": 1096 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"learning_rate": 7.868852459016392e-06, |
|
"loss": 0.0307, |
|
"step": 1097 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"learning_rate": 7.377049180327868e-06, |
|
"loss": 0.0327, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"learning_rate": 6.885245901639343e-06, |
|
"loss": 0.0448, |
|
"step": 1099 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 6.3934426229508185e-06, |
|
"loss": 0.0575, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 5.901639344262295e-06, |
|
"loss": 0.0564, |
|
"step": 1101 |
|
}, |
|
{ |
|
"epoch": 9.92, |
|
"learning_rate": 5.40983606557377e-06, |
|
"loss": 0.0421, |
|
"step": 1102 |
|
}, |
|
{ |
|
"epoch": 9.93, |
|
"learning_rate": 4.9180327868852455e-06, |
|
"loss": 0.0421, |
|
"step": 1103 |
|
}, |
|
{ |
|
"epoch": 9.94, |
|
"learning_rate": 4.426229508196721e-06, |
|
"loss": 0.0357, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"learning_rate": 3.934426229508196e-06, |
|
"loss": 0.0404, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 9.96, |
|
"learning_rate": 3.4426229508196716e-06, |
|
"loss": 0.031, |
|
"step": 1106 |
|
}, |
|
{ |
|
"epoch": 9.97, |
|
"learning_rate": 2.9508196721311474e-06, |
|
"loss": 0.0362, |
|
"step": 1107 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"learning_rate": 2.4590163934426227e-06, |
|
"loss": 0.0295, |
|
"step": 1108 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"learning_rate": 1.967213114754098e-06, |
|
"loss": 0.0268, |
|
"step": 1109 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.4754098360655737e-06, |
|
"loss": 0.0308, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 1110, |
|
"total_flos": 0.0, |
|
"train_loss": 2.1241667401347613, |
|
"train_runtime": 25891.0003, |
|
"train_samples_per_second": 11.022, |
|
"train_steps_per_second": 0.043 |
|
} |
|
], |
|
"max_steps": 1110, |
|
"num_train_epochs": 10, |
|
"total_flos": 0.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|