|
{ |
|
"best_metric": 1.987623929977417, |
|
"best_model_checkpoint": "output/checkpoint-1200", |
|
"epoch": 0.8204207861297611, |
|
"global_step": 1200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.9999999999999997e-06, |
|
"loss": 1.8876, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5.999999999999999e-06, |
|
"loss": 2.0725, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.999999999999999e-06, |
|
"loss": 2.0423, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.1999999999999999e-05, |
|
"loss": 2.0022, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 2.0503, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.7999999999999997e-05, |
|
"loss": 2.0501, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.1e-05, |
|
"loss": 2.0322, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.3999999999999997e-05, |
|
"loss": 2.0892, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.6999999999999996e-05, |
|
"loss": 2.0786, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 2.0111, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.2999999999999996e-05, |
|
"loss": 1.9539, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.5999999999999994e-05, |
|
"loss": 2.0585, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.9e-05, |
|
"loss": 2.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.2e-05, |
|
"loss": 1.9882, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 1.8964, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.7999999999999994e-05, |
|
"loss": 2.0331, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.1e-05, |
|
"loss": 2.0569, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.399999999999999e-05, |
|
"loss": 1.9941, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.6999999999999996e-05, |
|
"loss": 1.9494, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 2.0266, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.299999999999999e-05, |
|
"loss": 1.9042, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6.599999999999999e-05, |
|
"loss": 2.009, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6.9e-05, |
|
"loss": 1.9669, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 7.199999999999999e-05, |
|
"loss": 2.0222, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 7.5e-05, |
|
"loss": 1.8443, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 7.8e-05, |
|
"loss": 1.9351, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.1e-05, |
|
"loss": 1.9858, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.4e-05, |
|
"loss": 2.0311, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.699999999999999e-05, |
|
"loss": 1.9912, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 2.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.3e-05, |
|
"loss": 2.0135, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.599999999999999e-05, |
|
"loss": 1.9629, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.9e-05, |
|
"loss": 1.9932, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000102, |
|
"loss": 2.0303, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00010499999999999999, |
|
"loss": 1.9356, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00010799999999999998, |
|
"loss": 1.9696, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00011099999999999999, |
|
"loss": 1.9832, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00011399999999999999, |
|
"loss": 1.9901, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000117, |
|
"loss": 2.017, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 2.0139, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00012299999999999998, |
|
"loss": 1.9377, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00012599999999999997, |
|
"loss": 1.8405, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000129, |
|
"loss": 1.9986, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00013199999999999998, |
|
"loss": 2.0137, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000135, |
|
"loss": 2.0437, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000138, |
|
"loss": 2.0392, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00014099999999999998, |
|
"loss": 1.964, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00014399999999999998, |
|
"loss": 1.9871, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000147, |
|
"loss": 1.9833, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00015, |
|
"loss": 2.0132, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00015299999999999998, |
|
"loss": 2.0262, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000156, |
|
"loss": 1.9727, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000159, |
|
"loss": 1.9904, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000162, |
|
"loss": 1.955, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000165, |
|
"loss": 1.9937, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000168, |
|
"loss": 2.0414, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00017099999999999998, |
|
"loss": 1.9754, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00017399999999999997, |
|
"loss": 1.9559, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00017699999999999997, |
|
"loss": 1.971, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 1.959, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018299999999999998, |
|
"loss": 1.973, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000186, |
|
"loss": 1.9736, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018899999999999999, |
|
"loss": 2.01, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019199999999999998, |
|
"loss": 1.9916, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000195, |
|
"loss": 1.8412, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000198, |
|
"loss": 1.9188, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000201, |
|
"loss": 1.9836, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000204, |
|
"loss": 1.9659, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00020699999999999996, |
|
"loss": 1.8651, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 1.8155, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00021299999999999997, |
|
"loss": 1.9043, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00021599999999999996, |
|
"loss": 1.9087, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00021899999999999998, |
|
"loss": 2.0042, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00022199999999999998, |
|
"loss": 1.9537, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000225, |
|
"loss": 1.9211, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00022799999999999999, |
|
"loss": 1.9345, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00023099999999999998, |
|
"loss": 1.84, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000234, |
|
"loss": 1.9432, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000237, |
|
"loss": 1.9381, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 1.9052, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000243, |
|
"loss": 1.9634, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00024599999999999996, |
|
"loss": 1.9323, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000249, |
|
"loss": 2.0616, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00025199999999999995, |
|
"loss": 1.971, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00025499999999999996, |
|
"loss": 1.9197, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000258, |
|
"loss": 1.9734, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000261, |
|
"loss": 1.8279, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00026399999999999997, |
|
"loss": 1.8787, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000267, |
|
"loss": 1.9271, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00027, |
|
"loss": 1.8918, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00027299999999999997, |
|
"loss": 1.9327, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000276, |
|
"loss": 1.8631, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000279, |
|
"loss": 1.8385, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00028199999999999997, |
|
"loss": 1.9437, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000285, |
|
"loss": 1.8879, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00028799999999999995, |
|
"loss": 1.9186, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00029099999999999997, |
|
"loss": 1.9182, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000294, |
|
"loss": 1.8664, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00029699999999999996, |
|
"loss": 1.9239, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0003, |
|
"loss": 1.9942, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000303, |
|
"loss": 1.8882, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00030599999999999996, |
|
"loss": 1.9568, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000309, |
|
"loss": 1.9213, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000312, |
|
"loss": 1.8019, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00031499999999999996, |
|
"loss": 1.8682, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000318, |
|
"loss": 1.8517, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000321, |
|
"loss": 1.8922, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000324, |
|
"loss": 1.9581, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000327, |
|
"loss": 1.9073, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00033, |
|
"loss": 1.8199, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000333, |
|
"loss": 1.933, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000336, |
|
"loss": 1.8907, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00033899999999999995, |
|
"loss": 1.9093, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00034199999999999996, |
|
"loss": 1.8501, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00034499999999999993, |
|
"loss": 1.968, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00034799999999999995, |
|
"loss": 1.8998, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00035099999999999997, |
|
"loss": 1.9588, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00035399999999999993, |
|
"loss": 1.8592, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00035699999999999995, |
|
"loss": 1.9135, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00035999999999999997, |
|
"loss": 1.8535, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00036299999999999993, |
|
"loss": 1.8781, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00036599999999999995, |
|
"loss": 1.8987, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00036899999999999997, |
|
"loss": 1.8756, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000372, |
|
"loss": 1.8677, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00037499999999999995, |
|
"loss": 1.8222, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00037799999999999997, |
|
"loss": 1.9473, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000381, |
|
"loss": 1.8606, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00038399999999999996, |
|
"loss": 1.9605, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000387, |
|
"loss": 1.9135, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00039, |
|
"loss": 1.9203, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00039299999999999996, |
|
"loss": 1.9645, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000396, |
|
"loss": 1.8207, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000399, |
|
"loss": 1.9054, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000402, |
|
"loss": 1.8238, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000405, |
|
"loss": 1.9795, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000408, |
|
"loss": 1.923, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000411, |
|
"loss": 1.8111, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0004139999999999999, |
|
"loss": 1.8022, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00041699999999999994, |
|
"loss": 1.8601, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00041999999999999996, |
|
"loss": 1.9157, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00042299999999999993, |
|
"loss": 1.8795, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00042599999999999995, |
|
"loss": 1.8175, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00042899999999999997, |
|
"loss": 1.9633, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00043199999999999993, |
|
"loss": 1.7384, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00043499999999999995, |
|
"loss": 1.862, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00043799999999999997, |
|
"loss": 1.8612, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00044099999999999993, |
|
"loss": 1.8592, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00044399999999999995, |
|
"loss": 1.8175, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00044699999999999997, |
|
"loss": 1.8907, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00045, |
|
"loss": 1.9142, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00045299999999999995, |
|
"loss": 1.8859, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00045599999999999997, |
|
"loss": 1.8369, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000459, |
|
"loss": 1.914, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00046199999999999995, |
|
"loss": 1.8381, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00046499999999999997, |
|
"loss": 1.96, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000468, |
|
"loss": 1.7527, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00047099999999999996, |
|
"loss": 1.8762, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000474, |
|
"loss": 1.9295, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000477, |
|
"loss": 1.8433, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00047999999999999996, |
|
"loss": 1.7407, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000483, |
|
"loss": 1.9237, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000486, |
|
"loss": 1.8505, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000489, |
|
"loss": 1.8386, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0004919999999999999, |
|
"loss": 1.8489, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0004949999999999999, |
|
"loss": 1.8447, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000498, |
|
"loss": 1.8709, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0005009999999999999, |
|
"loss": 1.7427, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0005039999999999999, |
|
"loss": 1.7115, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000507, |
|
"loss": 1.8692, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0005099999999999999, |
|
"loss": 1.8863, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0005129999999999999, |
|
"loss": 1.7856, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000516, |
|
"loss": 1.8338, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0005189999999999999, |
|
"loss": 1.9064, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000522, |
|
"loss": 1.7566, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000525, |
|
"loss": 1.7792, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0005279999999999999, |
|
"loss": 1.7444, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000531, |
|
"loss": 1.8172, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000534, |
|
"loss": 1.8232, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0005369999999999999, |
|
"loss": 1.808, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00054, |
|
"loss": 1.7316, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000543, |
|
"loss": 1.7889, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0005459999999999999, |
|
"loss": 1.8263, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000549, |
|
"loss": 1.8702, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000552, |
|
"loss": 1.8295, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005549999999999999, |
|
"loss": 1.7151, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000558, |
|
"loss": 1.7877, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000561, |
|
"loss": 1.7469, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005639999999999999, |
|
"loss": 1.8627, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005669999999999999, |
|
"loss": 1.6922, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00057, |
|
"loss": 1.8159, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005729999999999999, |
|
"loss": 1.7839, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005759999999999999, |
|
"loss": 1.8165, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000579, |
|
"loss": 1.8228, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005819999999999999, |
|
"loss": 1.8615, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005849999999999999, |
|
"loss": 1.8149, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000588, |
|
"loss": 1.8531, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005909999999999999, |
|
"loss": 1.7961, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005939999999999999, |
|
"loss": 1.7878, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000597, |
|
"loss": 1.7078, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0006, |
|
"loss": 1.77, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005998937677053823, |
|
"loss": 1.7704, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005997875354107648, |
|
"loss": 1.8041, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005996813031161472, |
|
"loss": 1.791, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005995750708215297, |
|
"loss": 1.8864, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005994688385269121, |
|
"loss": 1.8871, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005993626062322946, |
|
"loss": 1.781, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000599256373937677, |
|
"loss": 1.8129, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005991501416430595, |
|
"loss": 1.8208, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005990439093484419, |
|
"loss": 1.6943, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005989376770538244, |
|
"loss": 1.8093, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005988314447592068, |
|
"loss": 1.8158, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005987252124645891, |
|
"loss": 1.7879, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005986189801699716, |
|
"loss": 1.7495, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000598512747875354, |
|
"loss": 1.8075, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005984065155807364, |
|
"loss": 1.897, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005983002832861189, |
|
"loss": 1.7493, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005981940509915014, |
|
"loss": 1.7134, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005980878186968838, |
|
"loss": 1.7252, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005979815864022663, |
|
"loss": 1.8076, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005978753541076487, |
|
"loss": 1.7753, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005977691218130311, |
|
"loss": 1.7346, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005976628895184136, |
|
"loss": 1.7701, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000597556657223796, |
|
"loss": 1.7343, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005974504249291785, |
|
"loss": 1.9214, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005973441926345608, |
|
"loss": 1.8475, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005972379603399432, |
|
"loss": 1.7976, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005971317280453257, |
|
"loss": 1.8312, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005970254957507082, |
|
"loss": 1.7942, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005969192634560906, |
|
"loss": 1.7478, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005968130311614731, |
|
"loss": 1.8265, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005967067988668555, |
|
"loss": 1.743, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005966005665722379, |
|
"loss": 1.924, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005964943342776204, |
|
"loss": 1.8448, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005963881019830028, |
|
"loss": 1.8081, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005962818696883852, |
|
"loss": 1.7978, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005961756373937677, |
|
"loss": 1.9161, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00059606940509915, |
|
"loss": 1.8836, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005959631728045325, |
|
"loss": 1.6924, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000595856940509915, |
|
"loss": 1.8725, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005957507082152974, |
|
"loss": 1.7645, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005956444759206798, |
|
"loss": 1.8543, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005955382436260623, |
|
"loss": 1.8915, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005954320113314447, |
|
"loss": 1.7467, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005953257790368272, |
|
"loss": 1.7581, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005952195467422096, |
|
"loss": 1.7859, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000595113314447592, |
|
"loss": 1.7575, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005950070821529745, |
|
"loss": 1.7291, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005949008498583569, |
|
"loss": 1.692, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005947946175637393, |
|
"loss": 1.9056, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005946883852691218, |
|
"loss": 1.8572, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005945821529745042, |
|
"loss": 1.7458, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005944759206798866, |
|
"loss": 1.8021, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005943696883852691, |
|
"loss": 1.8534, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005942634560906515, |
|
"loss": 1.7671, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005941572237960339, |
|
"loss": 1.9101, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005940509915014164, |
|
"loss": 1.7567, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005939447592067988, |
|
"loss": 1.7771, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005938385269121813, |
|
"loss": 1.8691, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005937322946175637, |
|
"loss": 1.764, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000593626062322946, |
|
"loss": 1.7978, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005935198300283285, |
|
"loss": 1.8609, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000593413597733711, |
|
"loss": 1.9074, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005933073654390934, |
|
"loss": 1.8182, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005932011331444759, |
|
"loss": 1.9095, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005930949008498583, |
|
"loss": 1.9103, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005929886685552407, |
|
"loss": 1.969, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005928824362606232, |
|
"loss": 1.9142, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005927762039660056, |
|
"loss": 1.8281, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005926699716713881, |
|
"loss": 1.8105, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005925637393767705, |
|
"loss": 1.854, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005924575070821529, |
|
"loss": 1.7758, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005923512747875354, |
|
"loss": 1.7467, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005922450424929179, |
|
"loss": 1.8032, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005921388101983002, |
|
"loss": 1.7267, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005920325779036826, |
|
"loss": 1.7873, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005919263456090651, |
|
"loss": 1.8578, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005918201133144475, |
|
"loss": 1.8684, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00059171388101983, |
|
"loss": 1.8266, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005916076487252124, |
|
"loss": 1.7916, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005915014164305948, |
|
"loss": 1.9168, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005913951841359773, |
|
"loss": 1.8837, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005912889518413597, |
|
"loss": 1.7875, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005911827195467422, |
|
"loss": 1.9046, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005910764872521247, |
|
"loss": 1.7552, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000590970254957507, |
|
"loss": 1.8291, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005908640226628894, |
|
"loss": 1.8531, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005907577903682719, |
|
"loss": 1.7991, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005906515580736543, |
|
"loss": 1.7896, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005905453257790368, |
|
"loss": 1.8406, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005904390934844192, |
|
"loss": 1.8222, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005903328611898016, |
|
"loss": 1.8473, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005902266288951841, |
|
"loss": 1.7958, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005901203966005665, |
|
"loss": 1.7879, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000590014164305949, |
|
"loss": 1.8077, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005899079320113314, |
|
"loss": 1.7829, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005898016997167139, |
|
"loss": 1.8282, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005896954674220963, |
|
"loss": 1.8525, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005895892351274787, |
|
"loss": 1.8552, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005894830028328611, |
|
"loss": 1.9035, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005893767705382435, |
|
"loss": 1.8043, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000589270538243626, |
|
"loss": 1.7884, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005891643059490084, |
|
"loss": 1.9804, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005890580736543909, |
|
"loss": 1.9464, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005889518413597733, |
|
"loss": 1.7684, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005888456090651558, |
|
"loss": 1.8019, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005887393767705382, |
|
"loss": 1.8127, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005886331444759207, |
|
"loss": 1.7938, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005885269121813031, |
|
"loss": 1.8452, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005884206798866856, |
|
"loss": 1.7761, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000588314447592068, |
|
"loss": 1.8166, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005882082152974503, |
|
"loss": 1.9685, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005881019830028328, |
|
"loss": 1.7582, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005879957507082152, |
|
"loss": 1.8127, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005878895184135976, |
|
"loss": 1.875, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005877832861189801, |
|
"loss": 1.7606, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005876770538243626, |
|
"loss": 1.8047, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000587570821529745, |
|
"loss": 1.8821, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005874645892351275, |
|
"loss": 1.9359, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005873583569405099, |
|
"loss": 1.9346, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005872521246458923, |
|
"loss": 1.8984, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005871458923512748, |
|
"loss": 1.7759, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005870396600566571, |
|
"loss": 1.7961, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005869334277620396, |
|
"loss": 1.9148, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000586827195467422, |
|
"loss": 1.898, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005867209631728044, |
|
"loss": 1.7916, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005866147308781869, |
|
"loss": 1.8758, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005865084985835694, |
|
"loss": 1.8184, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005864022662889518, |
|
"loss": 1.7792, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005862960339943343, |
|
"loss": 1.8199, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005861898016997167, |
|
"loss": 1.8013, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005860835694050991, |
|
"loss": 1.8723, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005859773371104816, |
|
"loss": 1.7972, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000585871104815864, |
|
"loss": 1.8765, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005857648725212463, |
|
"loss": 1.8028, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005856586402266288, |
|
"loss": 1.8377, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005855524079320112, |
|
"loss": 1.7413, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005854461756373937, |
|
"loss": 1.8906, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005853399433427762, |
|
"loss": 1.8192, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005852337110481586, |
|
"loss": 1.8595, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000585127478753541, |
|
"loss": 1.8473, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005850212464589235, |
|
"loss": 1.861, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005849150141643059, |
|
"loss": 1.8337, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005848087818696884, |
|
"loss": 1.8747, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005847025495750708, |
|
"loss": 1.8202, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005845963172804532, |
|
"loss": 1.8235, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005844900849858357, |
|
"loss": 1.915, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000584383852691218, |
|
"loss": 1.8257, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005842776203966005, |
|
"loss": 1.9219, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000584171388101983, |
|
"loss": 1.8675, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005840651558073654, |
|
"loss": 1.8938, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005839589235127478, |
|
"loss": 1.7652, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005838526912181303, |
|
"loss": 1.9201, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005837464589235127, |
|
"loss": 1.8092, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005836402266288951, |
|
"loss": 1.7251, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005835339943342776, |
|
"loss": 1.8227, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00058342776203966, |
|
"loss": 1.7901, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005833215297450425, |
|
"loss": 1.7998, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005832152974504249, |
|
"loss": 1.8229, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005831090651558072, |
|
"loss": 1.8179, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005830028328611897, |
|
"loss": 1.8142, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005828966005665722, |
|
"loss": 1.7447, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005827903682719546, |
|
"loss": 1.7978, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005826841359773371, |
|
"loss": 1.7471, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005825779036827195, |
|
"loss": 1.9006, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005824716713881019, |
|
"loss": 1.8023, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005823654390934844, |
|
"loss": 1.7522, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005822592067988668, |
|
"loss": 1.8935, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005821529745042493, |
|
"loss": 1.8137, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005820467422096317, |
|
"loss": 1.8023, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000581940509915014, |
|
"loss": 1.8378, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005818342776203965, |
|
"loss": 1.7871, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000581728045325779, |
|
"loss": 1.793, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005816218130311614, |
|
"loss": 1.8656, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005815155807365438, |
|
"loss": 1.8213, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005814093484419263, |
|
"loss": 1.8686, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005813031161473087, |
|
"loss": 1.7107, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005811968838526912, |
|
"loss": 1.8303, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005810906515580736, |
|
"loss": 1.8433, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000580984419263456, |
|
"loss": 1.8635, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005808781869688385, |
|
"loss": 1.8097, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005807719546742209, |
|
"loss": 1.8751, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005806657223796034, |
|
"loss": 1.8784, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005805594900849859, |
|
"loss": 1.8826, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005804532577903682, |
|
"loss": 1.7963, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005803470254957506, |
|
"loss": 1.8452, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005802407932011331, |
|
"loss": 1.7549, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005801345609065155, |
|
"loss": 1.851, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000580028328611898, |
|
"loss": 1.8198, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005799220963172804, |
|
"loss": 1.956, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005798158640226628, |
|
"loss": 1.9291, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005797096317280453, |
|
"loss": 1.784, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005796033994334277, |
|
"loss": 1.894, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005794971671388101, |
|
"loss": 1.886, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005793909348441927, |
|
"loss": 1.826, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000579284702549575, |
|
"loss": 1.8398, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005791784702549574, |
|
"loss": 1.7506, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005790722379603399, |
|
"loss": 1.8669, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005789660056657223, |
|
"loss": 1.8566, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005788597733711047, |
|
"loss": 1.8618, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005787535410764872, |
|
"loss": 1.8632, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"eval_loss": 2.021120071411133, |
|
"eval_runtime": 1471.972, |
|
"eval_samples_per_second": 9.879, |
|
"eval_steps_per_second": 9.879, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005786473087818696, |
|
"loss": 1.9149, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005785410764872521, |
|
"loss": 1.9261, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005784348441926345, |
|
"loss": 1.9515, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005783286118980169, |
|
"loss": 1.8682, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005782223796033994, |
|
"loss": 1.8787, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005781161473087819, |
|
"loss": 1.8882, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005780099150141643, |
|
"loss": 1.8623, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005779036827195467, |
|
"loss": 1.9533, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005777974504249291, |
|
"loss": 1.8139, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005776912181303115, |
|
"loss": 1.9436, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000577584985835694, |
|
"loss": 2.012, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005774787535410764, |
|
"loss": 1.8593, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005773725212464589, |
|
"loss": 1.9534, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005772662889518413, |
|
"loss": 1.8901, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005771600566572237, |
|
"loss": 1.9311, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005770538243626062, |
|
"loss": 1.9357, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005769475920679887, |
|
"loss": 2.0402, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005768413597733711, |
|
"loss": 1.9272, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005767351274787535, |
|
"loss": 1.8699, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000576628895184136, |
|
"loss": 1.9146, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005765226628895183, |
|
"loss": 1.8632, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005764164305949008, |
|
"loss": 1.9577, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005763101983002832, |
|
"loss": 1.9124, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005762039660056656, |
|
"loss": 1.8922, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005760977337110481, |
|
"loss": 1.8515, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005759915014164305, |
|
"loss": 1.9232, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000575885269121813, |
|
"loss": 1.9402, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005757790368271955, |
|
"loss": 1.885, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005756728045325779, |
|
"loss": 1.8847, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005755665722379603, |
|
"loss": 1.8776, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005754603399433428, |
|
"loss": 1.9292, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005753541076487251, |
|
"loss": 2.037, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005752478753541076, |
|
"loss": 1.9526, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00057514164305949, |
|
"loss": 1.8857, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005750354107648724, |
|
"loss": 1.8571, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005749291784702549, |
|
"loss": 1.99, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005748229461756373, |
|
"loss": 1.9094, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005747167138810198, |
|
"loss": 1.8634, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005746104815864022, |
|
"loss": 1.85, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005745042492917847, |
|
"loss": 1.9329, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005743980169971671, |
|
"loss": 1.9566, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005742917847025496, |
|
"loss": 1.9756, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000574185552407932, |
|
"loss": 1.9327, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005740793201133143, |
|
"loss": 1.8133, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005739730878186968, |
|
"loss": 1.9292, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005738668555240792, |
|
"loss": 1.8839, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005737606232294617, |
|
"loss": 1.9216, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005736543909348441, |
|
"loss": 1.9303, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005735481586402266, |
|
"loss": 2.0129, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000573441926345609, |
|
"loss": 1.9233, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005733356940509915, |
|
"loss": 1.809, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005732294617563739, |
|
"loss": 1.822, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005731232294617564, |
|
"loss": 1.829, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005730169971671388, |
|
"loss": 1.9524, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005729107648725212, |
|
"loss": 1.8695, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005728045325779037, |
|
"loss": 1.8614, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000572698300283286, |
|
"loss": 1.8697, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005725920679886684, |
|
"loss": 1.8378, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005724858356940509, |
|
"loss": 1.9554, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005723796033994334, |
|
"loss": 1.9066, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005722733711048158, |
|
"loss": 1.8836, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005721671388101983, |
|
"loss": 1.9541, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005720609065155807, |
|
"loss": 1.8879, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005719546742209631, |
|
"loss": 1.8126, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005718484419263456, |
|
"loss": 1.9861, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000571742209631728, |
|
"loss": 2.0253, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005716359773371105, |
|
"loss": 1.9213, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005715297450424929, |
|
"loss": 1.9988, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005714235127478752, |
|
"loss": 1.9285, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005713172804532577, |
|
"loss": 1.9973, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005712110481586402, |
|
"loss": 1.945, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005711048158640226, |
|
"loss": 1.9327, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005709985835694051, |
|
"loss": 2.0014, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005708923512747875, |
|
"loss": 1.8221, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005707861189801699, |
|
"loss": 1.8905, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005706798866855524, |
|
"loss": 1.9236, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005705736543909348, |
|
"loss": 1.8436, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005704674220963172, |
|
"loss": 1.8545, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005703611898016997, |
|
"loss": 1.8886, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000570254957507082, |
|
"loss": 1.873, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005701487252124645, |
|
"loss": 1.8624, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000570042492917847, |
|
"loss": 1.8614, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005699362606232294, |
|
"loss": 1.9153, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005698300283286118, |
|
"loss": 1.8524, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005697237960339943, |
|
"loss": 1.9039, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005696175637393767, |
|
"loss": 1.8866, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005695113314447592, |
|
"loss": 1.7717, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005694050991501416, |
|
"loss": 1.9695, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000569298866855524, |
|
"loss": 1.9931, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005691926345609065, |
|
"loss": 1.8473, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005690864022662889, |
|
"loss": 1.8366, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005689801699716714, |
|
"loss": 1.9242, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005688739376770539, |
|
"loss": 1.9081, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005687677053824362, |
|
"loss": 1.8658, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005686614730878186, |
|
"loss": 1.9603, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005685552407932011, |
|
"loss": 1.8292, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005684490084985835, |
|
"loss": 1.8917, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005683427762039659, |
|
"loss": 1.9171, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005682365439093484, |
|
"loss": 1.9373, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005681303116147308, |
|
"loss": 1.9529, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005680240793201133, |
|
"loss": 1.9459, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005679178470254957, |
|
"loss": 1.9941, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005678116147308781, |
|
"loss": 1.8738, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0005677053824362606, |
|
"loss": 1.9666, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000567599150141643, |
|
"loss": 2.0352, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005674929178470254, |
|
"loss": 1.8621, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005673866855524079, |
|
"loss": 1.8733, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005672804532577903, |
|
"loss": 1.9414, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005671742209631727, |
|
"loss": 1.9515, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005670679886685552, |
|
"loss": 1.891, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005669617563739376, |
|
"loss": 2.0231, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005668555240793201, |
|
"loss": 1.9534, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005667492917847025, |
|
"loss": 1.9437, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005666430594900849, |
|
"loss": 1.8914, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005665368271954674, |
|
"loss": 1.8711, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005664305949008499, |
|
"loss": 1.9708, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005663243626062323, |
|
"loss": 1.9462, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005662181303116146, |
|
"loss": 1.8547, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005661118980169971, |
|
"loss": 1.842, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005660056657223795, |
|
"loss": 1.8544, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000565899433427762, |
|
"loss": 1.8744, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005657932011331444, |
|
"loss": 1.9211, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005656869688385268, |
|
"loss": 1.9399, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005655807365439093, |
|
"loss": 1.9049, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005654745042492917, |
|
"loss": 1.9623, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005653682719546742, |
|
"loss": 1.9142, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005652620396600567, |
|
"loss": 1.8899, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005651558073654391, |
|
"loss": 1.8878, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005650495750708215, |
|
"loss": 1.924, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000564943342776204, |
|
"loss": 1.9087, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005648371104815863, |
|
"loss": 1.9945, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005647308781869688, |
|
"loss": 1.8945, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005646246458923512, |
|
"loss": 1.9209, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005645184135977336, |
|
"loss": 1.9301, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005644121813031161, |
|
"loss": 1.8516, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005643059490084985, |
|
"loss": 1.906, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000564199716713881, |
|
"loss": 1.91, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005640934844192634, |
|
"loss": 1.9069, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005639872521246459, |
|
"loss": 1.7911, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005638810198300283, |
|
"loss": 1.8925, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005637747875354108, |
|
"loss": 1.9526, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005636685552407931, |
|
"loss": 1.9279, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005635623229461755, |
|
"loss": 1.8829, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000563456090651558, |
|
"loss": 1.9501, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005633498583569404, |
|
"loss": 1.9602, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005632436260623229, |
|
"loss": 1.9497, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005631373937677053, |
|
"loss": 1.84, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005630311614730878, |
|
"loss": 1.9337, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005629249291784702, |
|
"loss": 1.8709, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005628186968838527, |
|
"loss": 1.9231, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005627124645892351, |
|
"loss": 1.9035, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005626062322946176, |
|
"loss": 1.8365, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005625, |
|
"loss": 2.1028, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005623937677053823, |
|
"loss": 1.8692, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005622875354107648, |
|
"loss": 1.9452, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005621813031161472, |
|
"loss": 1.883, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005620750708215296, |
|
"loss": 1.8987, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005619688385269121, |
|
"loss": 1.9188, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005618626062322946, |
|
"loss": 1.9373, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000561756373937677, |
|
"loss": 1.9167, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005616501416430595, |
|
"loss": 1.8628, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005615439093484419, |
|
"loss": 1.9258, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005614376770538243, |
|
"loss": 1.9783, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005613314447592068, |
|
"loss": 1.9738, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005612252124645892, |
|
"loss": 1.9351, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005611189801699717, |
|
"loss": 1.9109, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000561012747875354, |
|
"loss": 1.832, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005609065155807364, |
|
"loss": 1.9258, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005608002832861189, |
|
"loss": 1.9888, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005606940509915014, |
|
"loss": 1.9195, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005605878186968838, |
|
"loss": 1.8607, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005604815864022663, |
|
"loss": 1.9868, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005603753541076487, |
|
"loss": 1.885, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005602691218130311, |
|
"loss": 1.9379, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005601628895184136, |
|
"loss": 1.8685, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000560056657223796, |
|
"loss": 1.9636, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005599504249291784, |
|
"loss": 1.9925, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005598441926345609, |
|
"loss": 1.9332, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005597379603399432, |
|
"loss": 1.8458, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005596317280453257, |
|
"loss": 1.8204, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005595254957507082, |
|
"loss": 1.8656, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005594192634560906, |
|
"loss": 1.9399, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000559313031161473, |
|
"loss": 1.891, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005592067988668555, |
|
"loss": 1.8595, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005591005665722379, |
|
"loss": 1.8989, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005589943342776204, |
|
"loss": 1.8275, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005588881019830028, |
|
"loss": 1.8716, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005587818696883852, |
|
"loss": 1.933, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005586756373937677, |
|
"loss": 1.982, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00055856940509915, |
|
"loss": 1.8773, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005584631728045325, |
|
"loss": 1.8793, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000558356940509915, |
|
"loss": 1.9778, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005582507082152974, |
|
"loss": 1.9904, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005581444759206798, |
|
"loss": 1.9301, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005580382436260623, |
|
"loss": 1.9367, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005579320113314447, |
|
"loss": 1.9853, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005578257790368272, |
|
"loss": 1.9987, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005577195467422096, |
|
"loss": 1.88, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000557613314447592, |
|
"loss": 1.9391, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005575070821529745, |
|
"loss": 2.0134, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005574008498583569, |
|
"loss": 1.8383, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005572946175637392, |
|
"loss": 1.8189, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005571883852691217, |
|
"loss": 1.9324, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005570821529745042, |
|
"loss": 1.8702, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005569759206798866, |
|
"loss": 1.8803, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005568696883852691, |
|
"loss": 1.8409, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005567634560906515, |
|
"loss": 1.8715, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005566572237960339, |
|
"loss": 1.9359, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005565509915014164, |
|
"loss": 1.922, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005564447592067988, |
|
"loss": 1.8341, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005563385269121813, |
|
"loss": 1.9494, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005562322946175637, |
|
"loss": 1.9585, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005561260623229461, |
|
"loss": 1.8668, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005560198300283286, |
|
"loss": 1.9067, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000555913597733711, |
|
"loss": 1.8475, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005558073654390934, |
|
"loss": 1.9009, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005557011331444759, |
|
"loss": 1.941, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005555949008498583, |
|
"loss": 1.9756, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005554886685552407, |
|
"loss": 1.9568, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005553824362606232, |
|
"loss": 1.8794, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005552762039660056, |
|
"loss": 1.9152, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.000555169971671388, |
|
"loss": 2.0171, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005550637393767705, |
|
"loss": 1.9388, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005549575070821529, |
|
"loss": 1.9048, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005548512747875354, |
|
"loss": 1.9667, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005547450424929179, |
|
"loss": 1.9878, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005546388101983003, |
|
"loss": 1.9145, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005545325779036826, |
|
"loss": 1.9873, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005544263456090651, |
|
"loss": 2.015, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005543201133144475, |
|
"loss": 1.9204, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00055421388101983, |
|
"loss": 1.8899, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005541076487252124, |
|
"loss": 1.9057, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005540014164305948, |
|
"loss": 1.9663, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005538951841359773, |
|
"loss": 1.9447, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005537889518413597, |
|
"loss": 1.957, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005536827195467422, |
|
"loss": 1.8772, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005535764872521247, |
|
"loss": 1.9601, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005534702549575071, |
|
"loss": 1.8354, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005533640226628895, |
|
"loss": 1.9107, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.000553257790368272, |
|
"loss": 1.9511, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005531515580736543, |
|
"loss": 1.9354, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005530453257790367, |
|
"loss": 1.9189, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005529390934844192, |
|
"loss": 1.803, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005528328611898016, |
|
"loss": 1.9278, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005527266288951841, |
|
"loss": 1.8629, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005526203966005665, |
|
"loss": 1.9348, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.000552514164305949, |
|
"loss": 1.9472, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005524079320113314, |
|
"loss": 1.9808, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005523016997167139, |
|
"loss": 1.8421, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005521954674220963, |
|
"loss": 1.9773, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005520892351274788, |
|
"loss": 1.9566, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005519830028328611, |
|
"loss": 1.9461, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005518767705382435, |
|
"loss": 1.9117, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000551770538243626, |
|
"loss": 1.8791, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005516643059490084, |
|
"loss": 1.9065, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005515580736543909, |
|
"loss": 1.9535, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005514518413597733, |
|
"loss": 1.8854, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005513456090651558, |
|
"loss": 2.023, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005512393767705382, |
|
"loss": 2.0061, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005511331444759207, |
|
"loss": 1.8768, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005510269121813031, |
|
"loss": 1.948, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005509206798866855, |
|
"loss": 1.9478, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000550814447592068, |
|
"loss": 1.977, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005507082152974503, |
|
"loss": 1.9208, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005506019830028328, |
|
"loss": 1.9287, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005504957507082152, |
|
"loss": 1.8817, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005503895184135976, |
|
"loss": 1.9567, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005502832861189801, |
|
"loss": 1.8139, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005501770538243626, |
|
"loss": 1.913, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.000550070821529745, |
|
"loss": 1.9434, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005499645892351275, |
|
"loss": 1.9377, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005498583569405099, |
|
"loss": 1.8781, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005497521246458923, |
|
"loss": 1.8837, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005496458923512748, |
|
"loss": 1.9478, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005495396600566572, |
|
"loss": 1.8756, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005494334277620397, |
|
"loss": 1.9507, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.000549327195467422, |
|
"loss": 1.9027, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005492209631728044, |
|
"loss": 1.9749, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005491147308781869, |
|
"loss": 1.8564, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005490084985835694, |
|
"loss": 1.9649, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005489022662889518, |
|
"loss": 2.0128, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005487960339943342, |
|
"loss": 1.9041, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005486898016997167, |
|
"loss": 2.0121, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005485835694050991, |
|
"loss": 1.9203, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005484773371104816, |
|
"loss": 1.9, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000548371104815864, |
|
"loss": 1.9857, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005482648725212464, |
|
"loss": 2.0838, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005481586402266289, |
|
"loss": 1.8973, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005480524079320112, |
|
"loss": 1.9636, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005479461756373937, |
|
"loss": 1.9726, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005478399433427762, |
|
"loss": 1.9062, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005477337110481586, |
|
"loss": 1.8709, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000547627478753541, |
|
"loss": 1.9234, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005475212464589235, |
|
"loss": 1.8859, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005474150141643059, |
|
"loss": 1.9746, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005473087818696884, |
|
"loss": 1.9229, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005472025495750708, |
|
"loss": 1.8827, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005470963172804532, |
|
"loss": 1.9455, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005469900849858357, |
|
"loss": 1.8779, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.000546883852691218, |
|
"loss": 1.9151, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005467776203966004, |
|
"loss": 1.8283, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005466713881019829, |
|
"loss": 1.9657, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005465651558073654, |
|
"loss": 2.0174, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005464589235127478, |
|
"loss": 1.9462, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005463526912181303, |
|
"loss": 1.9257, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005462464589235127, |
|
"loss": 1.8828, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005461402266288951, |
|
"loss": 1.8831, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005460339943342776, |
|
"loss": 1.9266, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00054592776203966, |
|
"loss": 2.0017, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005458215297450425, |
|
"loss": 1.9537, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005457152974504249, |
|
"loss": 1.9141, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005456090651558072, |
|
"loss": 1.9393, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005455028328611897, |
|
"loss": 1.8896, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005453966005665722, |
|
"loss": 1.8186, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005452903682719546, |
|
"loss": 1.9527, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005451841359773371, |
|
"loss": 2.0319, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005450779036827195, |
|
"loss": 1.8668, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005449716713881019, |
|
"loss": 1.9775, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005448654390934844, |
|
"loss": 1.8624, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005447592067988668, |
|
"loss": 1.8725, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005446529745042492, |
|
"loss": 1.8682, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005445467422096317, |
|
"loss": 1.9078, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005444405099150141, |
|
"loss": 1.8206, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005443342776203966, |
|
"loss": 1.9923, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.000544228045325779, |
|
"loss": 1.9765, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005441218130311614, |
|
"loss": 1.9154, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005440155807365438, |
|
"loss": 1.8111, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005439093484419263, |
|
"loss": 1.9149, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005438031161473087, |
|
"loss": 1.927, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005436968838526912, |
|
"loss": 1.855, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005435906515580736, |
|
"loss": 1.8842, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.000543484419263456, |
|
"loss": 1.955, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005433781869688385, |
|
"loss": 1.9599, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005432719546742209, |
|
"loss": 1.8462, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005431657223796034, |
|
"loss": 1.7453, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005430594900849859, |
|
"loss": 1.8518, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005429532577903683, |
|
"loss": 1.7791, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005428470254957506, |
|
"loss": 1.918, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005427407932011331, |
|
"loss": 1.8775, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005426345609065155, |
|
"loss": 1.9502, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005425283286118979, |
|
"loss": 1.9256, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005424220963172804, |
|
"loss": 1.9275, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005423158640226628, |
|
"loss": 1.9383, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005422096317280453, |
|
"loss": 1.8964, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005421033994334277, |
|
"loss": 1.9313, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005419971671388101, |
|
"loss": 1.8931, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005418909348441926, |
|
"loss": 1.9748, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005417847025495751, |
|
"loss": 1.985, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005416784702549575, |
|
"loss": 1.8515, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00054157223796034, |
|
"loss": 1.8717, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005414660056657223, |
|
"loss": 1.9044, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005413597733711047, |
|
"loss": 1.9972, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005412535410764872, |
|
"loss": 1.8846, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005411473087818696, |
|
"loss": 1.8465, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005410410764872521, |
|
"loss": 1.9254, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005409348441926345, |
|
"loss": 1.89, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005408286118980169, |
|
"loss": 1.9923, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005407223796033994, |
|
"loss": 1.8325, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005406161473087819, |
|
"loss": 2.023, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005405099150141643, |
|
"loss": 1.8279, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005404036827195467, |
|
"loss": 1.9296, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005402974504249291, |
|
"loss": 1.8728, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005401912181303115, |
|
"loss": 1.9336, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.000540084985835694, |
|
"loss": 1.8554, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005399787535410764, |
|
"loss": 1.9388, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005398725212464588, |
|
"loss": 1.9101, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005397662889518413, |
|
"loss": 1.866, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005396600566572237, |
|
"loss": 1.9731, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005395538243626062, |
|
"loss": 1.9263, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005394475920679887, |
|
"loss": 1.9337, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005393413597733711, |
|
"loss": 1.896, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005392351274787535, |
|
"loss": 1.9376, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.000539128895184136, |
|
"loss": 1.9601, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005390226628895183, |
|
"loss": 1.9177, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005389164305949008, |
|
"loss": 1.9044, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005388101983002832, |
|
"loss": 1.9378, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005387039660056656, |
|
"loss": 1.8153, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005385977337110481, |
|
"loss": 1.9445, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005384915014164305, |
|
"loss": 1.8467, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.000538385269121813, |
|
"loss": 1.8743, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005382790368271955, |
|
"loss": 1.8839, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005381728045325779, |
|
"loss": 1.8023, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005380665722379603, |
|
"loss": 1.9814, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005379603399433428, |
|
"loss": 1.8621, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005378541076487252, |
|
"loss": 1.9371, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005377478753541075, |
|
"loss": 1.9692, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00053764164305949, |
|
"loss": 1.9392, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005375354107648724, |
|
"loss": 2.0, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005374291784702549, |
|
"loss": 1.9281, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005373229461756373, |
|
"loss": 1.9512, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005372167138810198, |
|
"loss": 1.9222, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005371104815864022, |
|
"loss": 1.9698, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005370042492917847, |
|
"loss": 1.8392, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005368980169971671, |
|
"loss": 1.9714, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005367917847025496, |
|
"loss": 1.8521, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.000536685552407932, |
|
"loss": 1.9495, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005365793201133144, |
|
"loss": 1.9401, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005364730878186969, |
|
"loss": 1.8449, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005363668555240792, |
|
"loss": 1.9425, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005362606232294617, |
|
"loss": 1.8826, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_loss": 1.9985228776931763, |
|
"eval_runtime": 1468.32, |
|
"eval_samples_per_second": 9.904, |
|
"eval_steps_per_second": 9.904, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005361543909348441, |
|
"loss": 1.9264, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005360481586402266, |
|
"loss": 1.8821, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.000535941926345609, |
|
"loss": 2.0427, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005358356940509915, |
|
"loss": 1.8675, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005357294617563739, |
|
"loss": 1.9315, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005356232294617563, |
|
"loss": 2.0091, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005355169971671388, |
|
"loss": 1.9447, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005354107648725212, |
|
"loss": 1.9089, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005353045325779037, |
|
"loss": 1.8641, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.000535198300283286, |
|
"loss": 1.858, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005350920679886684, |
|
"loss": 1.9707, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005349858356940509, |
|
"loss": 1.9646, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005348796033994334, |
|
"loss": 1.8502, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005347733711048158, |
|
"loss": 1.9738, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005346671388101983, |
|
"loss": 1.9471, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005345609065155807, |
|
"loss": 1.8659, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005344546742209631, |
|
"loss": 1.8747, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005343484419263456, |
|
"loss": 1.8295, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.000534242209631728, |
|
"loss": 1.906, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005341359773371105, |
|
"loss": 1.9628, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005340297450424929, |
|
"loss": 1.8916, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005339235127478752, |
|
"loss": 1.9851, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005338172804532577, |
|
"loss": 1.9781, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005337110481586402, |
|
"loss": 1.8828, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0005336048158640226, |
|
"loss": 1.9323, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.000533498583569405, |
|
"loss": 1.8749, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005333923512747875, |
|
"loss": 1.9325, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005332861189801699, |
|
"loss": 1.8826, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005331798866855524, |
|
"loss": 1.9116, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005330736543909348, |
|
"loss": 1.8078, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005329674220963172, |
|
"loss": 1.8018, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005328611898016997, |
|
"loss": 1.8972, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005327549575070821, |
|
"loss": 1.88, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005326487252124646, |
|
"loss": 1.8848, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.000532542492917847, |
|
"loss": 1.9582, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005324362606232294, |
|
"loss": 1.9269, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005323300283286118, |
|
"loss": 2.041, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005322237960339943, |
|
"loss": 2.0065, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005321175637393767, |
|
"loss": 1.8516, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005320113314447592, |
|
"loss": 1.913, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0005319050991501416, |
|
"loss": 1.9794, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.000531798866855524, |
|
"loss": 1.9203, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005316926345609065, |
|
"loss": 1.928, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005315864022662889, |
|
"loss": 1.9136, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005314801699716713, |
|
"loss": 1.9003, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005313739376770538, |
|
"loss": 1.8607, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005312677053824363, |
|
"loss": 1.9404, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005311614730878186, |
|
"loss": 1.8756, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005310552407932011, |
|
"loss": 1.9147, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005309490084985835, |
|
"loss": 1.9053, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005308427762039659, |
|
"loss": 1.8758, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005307365439093484, |
|
"loss": 1.9473, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005306303116147308, |
|
"loss": 1.8892, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005305240793201133, |
|
"loss": 1.854, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005304178470254957, |
|
"loss": 1.9216, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005303116147308781, |
|
"loss": 1.9903, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005302053824362606, |
|
"loss": 1.8707, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005300991501416431, |
|
"loss": 1.9148, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005299929178470255, |
|
"loss": 1.9627, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.000529886685552408, |
|
"loss": 1.9643, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005297804532577903, |
|
"loss": 1.9943, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005296742209631727, |
|
"loss": 1.9124, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005295679886685552, |
|
"loss": 1.9824, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005294617563739376, |
|
"loss": 1.8462, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00052935552407932, |
|
"loss": 1.979, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005292492917847025, |
|
"loss": 1.9432, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005291430594900849, |
|
"loss": 1.8095, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005290368271954674, |
|
"loss": 1.892, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005289305949008499, |
|
"loss": 1.9231, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0005288243626062323, |
|
"loss": 1.995, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005287181303116147, |
|
"loss": 1.9238, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005286118980169971, |
|
"loss": 1.8531, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005285056657223795, |
|
"loss": 1.9406, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.000528399433427762, |
|
"loss": 1.8503, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005282932011331444, |
|
"loss": 1.8353, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005281869688385268, |
|
"loss": 1.8922, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005280807365439093, |
|
"loss": 1.9666, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005279745042492917, |
|
"loss": 2.0114, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005278682719546742, |
|
"loss": 1.9562, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005277620396600567, |
|
"loss": 1.9049, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005276558073654391, |
|
"loss": 1.9307, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005275495750708215, |
|
"loss": 2.001, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.000527443342776204, |
|
"loss": 1.9832, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0005273371104815863, |
|
"loss": 1.928, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005272308781869687, |
|
"loss": 1.959, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005271246458923512, |
|
"loss": 1.9206, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005270184135977336, |
|
"loss": 1.8442, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005269121813031161, |
|
"loss": 1.7504, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005268059490084985, |
|
"loss": 1.8934, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.000526699716713881, |
|
"loss": 1.8794, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005265934844192634, |
|
"loss": 1.8589, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005264872521246459, |
|
"loss": 1.8441, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005263810198300283, |
|
"loss": 1.8841, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005262747875354108, |
|
"loss": 1.929, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005261685552407932, |
|
"loss": 1.9099, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005260623229461755, |
|
"loss": 1.8891, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.000525956090651558, |
|
"loss": 1.9476, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005258498583569404, |
|
"loss": 1.8918, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0005257436260623229, |
|
"loss": 1.9186, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005256373937677053, |
|
"loss": 1.8635, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005255311614730878, |
|
"loss": 2.0291, |
|
"step": 901 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005254249291784702, |
|
"loss": 1.9284, |
|
"step": 902 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005253186968838527, |
|
"loss": 1.8926, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005252124645892351, |
|
"loss": 1.866, |
|
"step": 904 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005251062322946175, |
|
"loss": 1.9719, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.000525, |
|
"loss": 1.8531, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005248937677053824, |
|
"loss": 1.9524, |
|
"step": 907 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005247875354107649, |
|
"loss": 1.8973, |
|
"step": 908 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005246813031161472, |
|
"loss": 1.8233, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005245750708215296, |
|
"loss": 1.9966, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005244688385269121, |
|
"loss": 1.9708, |
|
"step": 911 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005243626062322946, |
|
"loss": 1.8901, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.000524256373937677, |
|
"loss": 2.0427, |
|
"step": 913 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005241501416430595, |
|
"loss": 1.7927, |
|
"step": 914 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005240439093484419, |
|
"loss": 1.9332, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005239376770538243, |
|
"loss": 1.9916, |
|
"step": 916 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005238314447592068, |
|
"loss": 1.9279, |
|
"step": 917 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005237252124645892, |
|
"loss": 1.8911, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005236189801699717, |
|
"loss": 1.8922, |
|
"step": 919 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.000523512747875354, |
|
"loss": 2.0017, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005234065155807364, |
|
"loss": 1.9189, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005233002832861189, |
|
"loss": 1.9797, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005231940509915014, |
|
"loss": 1.9341, |
|
"step": 923 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005230878186968838, |
|
"loss": 1.8676, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005229815864022662, |
|
"loss": 2.0161, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005228753541076487, |
|
"loss": 1.9621, |
|
"step": 926 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005227691218130311, |
|
"loss": 1.9889, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0005226628895184136, |
|
"loss": 2.0333, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.000522556657223796, |
|
"loss": 2.0584, |
|
"step": 929 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005224504249291784, |
|
"loss": 1.8608, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005223441926345609, |
|
"loss": 1.8649, |
|
"step": 931 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005222379603399432, |
|
"loss": 1.9765, |
|
"step": 932 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005221317280453257, |
|
"loss": 1.9244, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005220254957507082, |
|
"loss": 1.9518, |
|
"step": 934 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005219192634560906, |
|
"loss": 1.8535, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.000521813031161473, |
|
"loss": 1.8148, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005217067988668555, |
|
"loss": 1.881, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005216005665722379, |
|
"loss": 1.9989, |
|
"step": 938 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005214943342776204, |
|
"loss": 1.9885, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005213881019830028, |
|
"loss": 1.9382, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005212818696883852, |
|
"loss": 1.897, |
|
"step": 941 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005211756373937677, |
|
"loss": 1.9608, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0005210694050991501, |
|
"loss": 2.0054, |
|
"step": 943 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005209631728045324, |
|
"loss": 1.8902, |
|
"step": 944 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005208569405099149, |
|
"loss": 1.887, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005207507082152974, |
|
"loss": 1.8734, |
|
"step": 946 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005206444759206798, |
|
"loss": 1.8166, |
|
"step": 947 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005205382436260623, |
|
"loss": 1.9099, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005204320113314447, |
|
"loss": 2.0139, |
|
"step": 949 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005203257790368271, |
|
"loss": 1.8796, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005202195467422096, |
|
"loss": 1.9342, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.000520113314447592, |
|
"loss": 1.9592, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005200070821529745, |
|
"loss": 1.9208, |
|
"step": 953 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005199008498583569, |
|
"loss": 1.9498, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005197946175637393, |
|
"loss": 1.9403, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005196883852691218, |
|
"loss": 1.8585, |
|
"step": 956 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005195821529745043, |
|
"loss": 1.8344, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005194759206798866, |
|
"loss": 1.8947, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005193696883852691, |
|
"loss": 2.0165, |
|
"step": 959 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005192634560906515, |
|
"loss": 1.9441, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005191572237960339, |
|
"loss": 1.9187, |
|
"step": 961 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005190509915014164, |
|
"loss": 1.8554, |
|
"step": 962 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005189447592067988, |
|
"loss": 1.9357, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005188385269121813, |
|
"loss": 1.886, |
|
"step": 964 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005187322946175637, |
|
"loss": 1.9487, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005186260623229461, |
|
"loss": 2.0203, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005185198300283286, |
|
"loss": 1.9021, |
|
"step": 967 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005184135977337111, |
|
"loss": 1.8394, |
|
"step": 968 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005183073654390935, |
|
"loss": 1.9888, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005182011331444758, |
|
"loss": 1.9302, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005180949008498583, |
|
"loss": 1.9058, |
|
"step": 971 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005179886685552407, |
|
"loss": 1.9338, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005178824362606232, |
|
"loss": 1.8802, |
|
"step": 973 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005177762039660056, |
|
"loss": 1.9089, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.000517669971671388, |
|
"loss": 1.846, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005175637393767705, |
|
"loss": 1.9375, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005174575070821529, |
|
"loss": 1.9994, |
|
"step": 977 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005173512747875354, |
|
"loss": 1.9906, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005172450424929179, |
|
"loss": 1.8686, |
|
"step": 979 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005171388101983003, |
|
"loss": 2.0107, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005170325779036827, |
|
"loss": 1.9346, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005169263456090651, |
|
"loss": 1.8315, |
|
"step": 982 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005168201133144475, |
|
"loss": 1.9107, |
|
"step": 983 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00051671388101983, |
|
"loss": 1.8907, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005166076487252124, |
|
"loss": 1.8357, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005165014164305948, |
|
"loss": 1.914, |
|
"step": 986 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005163951841359773, |
|
"loss": 1.8738, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005162889518413597, |
|
"loss": 1.925, |
|
"step": 988 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005161827195467422, |
|
"loss": 1.9579, |
|
"step": 989 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005160764872521246, |
|
"loss": 1.8535, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005159702549575071, |
|
"loss": 1.8589, |
|
"step": 991 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005158640226628895, |
|
"loss": 1.8919, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000515757790368272, |
|
"loss": 1.9084, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005156515580736543, |
|
"loss": 1.9123, |
|
"step": 994 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005155453257790367, |
|
"loss": 1.9736, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005154390934844192, |
|
"loss": 1.852, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005153328611898016, |
|
"loss": 1.8952, |
|
"step": 997 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005152266288951841, |
|
"loss": 1.9945, |
|
"step": 998 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005151203966005665, |
|
"loss": 1.9205, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000515014164305949, |
|
"loss": 1.8991, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005149079320113314, |
|
"loss": 1.9652, |
|
"step": 1001 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005148016997167139, |
|
"loss": 1.8882, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005146954674220963, |
|
"loss": 1.8064, |
|
"step": 1003 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005145892351274788, |
|
"loss": 1.9101, |
|
"step": 1004 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005144830028328612, |
|
"loss": 1.9095, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005143767705382435, |
|
"loss": 1.7793, |
|
"step": 1006 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.000514270538243626, |
|
"loss": 1.8314, |
|
"step": 1007 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005141643059490084, |
|
"loss": 1.8174, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005140580736543908, |
|
"loss": 1.8951, |
|
"step": 1009 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005139518413597733, |
|
"loss": 1.947, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005138456090651558, |
|
"loss": 1.9609, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005137393767705382, |
|
"loss": 1.9234, |
|
"step": 1012 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005136331444759207, |
|
"loss": 1.9117, |
|
"step": 1013 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005135269121813031, |
|
"loss": 1.9156, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005134206798866855, |
|
"loss": 1.9459, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.000513314447592068, |
|
"loss": 1.948, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005132082152974504, |
|
"loss": 1.8795, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005131019830028329, |
|
"loss": 2.0597, |
|
"step": 1018 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005129957507082152, |
|
"loss": 1.9661, |
|
"step": 1019 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005128895184135976, |
|
"loss": 1.9242, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005127832861189801, |
|
"loss": 1.9773, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005126770538243626, |
|
"loss": 2.0018, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.000512570821529745, |
|
"loss": 1.8503, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005124645892351275, |
|
"loss": 1.8622, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005123583569405099, |
|
"loss": 2.0041, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005122521246458923, |
|
"loss": 1.9051, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005121458923512748, |
|
"loss": 1.9256, |
|
"step": 1027 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005120396600566572, |
|
"loss": 1.8081, |
|
"step": 1028 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005119334277620396, |
|
"loss": 1.9711, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.000511827195467422, |
|
"loss": 1.8529, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005117209631728044, |
|
"loss": 1.9109, |
|
"step": 1031 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005116147308781869, |
|
"loss": 1.8838, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005115084985835694, |
|
"loss": 1.9032, |
|
"step": 1033 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005114022662889518, |
|
"loss": 1.9147, |
|
"step": 1034 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005112960339943342, |
|
"loss": 1.9068, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005111898016997167, |
|
"loss": 1.9089, |
|
"step": 1036 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005110835694050991, |
|
"loss": 1.9809, |
|
"step": 1037 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005109773371104816, |
|
"loss": 1.9325, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.000510871104815864, |
|
"loss": 1.9286, |
|
"step": 1039 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005107648725212464, |
|
"loss": 1.89, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005106586402266289, |
|
"loss": 1.8083, |
|
"step": 1041 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005105524079320113, |
|
"loss": 1.9368, |
|
"step": 1042 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005104461756373937, |
|
"loss": 1.8549, |
|
"step": 1043 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005103399433427762, |
|
"loss": 1.9071, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005102337110481586, |
|
"loss": 1.8773, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.000510127478753541, |
|
"loss": 1.8871, |
|
"step": 1046 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005100212464589235, |
|
"loss": 1.9191, |
|
"step": 1047 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005099150141643059, |
|
"loss": 1.9498, |
|
"step": 1048 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005098087818696883, |
|
"loss": 1.9836, |
|
"step": 1049 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005097025495750708, |
|
"loss": 1.8851, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005095963172804532, |
|
"loss": 1.9971, |
|
"step": 1051 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005094900849858357, |
|
"loss": 1.9359, |
|
"step": 1052 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005093838526912181, |
|
"loss": 1.8921, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005092776203966004, |
|
"loss": 1.815, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005091713881019829, |
|
"loss": 1.8988, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005090651558073654, |
|
"loss": 1.8765, |
|
"step": 1056 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005089589235127478, |
|
"loss": 1.9362, |
|
"step": 1057 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005088526912181303, |
|
"loss": 1.9116, |
|
"step": 1058 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005087464589235127, |
|
"loss": 1.8551, |
|
"step": 1059 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0005086402266288951, |
|
"loss": 1.9469, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005085339943342776, |
|
"loss": 1.8255, |
|
"step": 1061 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00050842776203966, |
|
"loss": 1.8748, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005083215297450425, |
|
"loss": 1.9767, |
|
"step": 1063 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005082152974504249, |
|
"loss": 1.8532, |
|
"step": 1064 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005081090651558073, |
|
"loss": 1.9841, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005080028328611898, |
|
"loss": 1.9048, |
|
"step": 1066 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005078966005665723, |
|
"loss": 1.9636, |
|
"step": 1067 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005077903682719546, |
|
"loss": 1.8621, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.000507684135977337, |
|
"loss": 1.8718, |
|
"step": 1069 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005075779036827195, |
|
"loss": 2.0384, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005074716713881019, |
|
"loss": 1.9328, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005073654390934844, |
|
"loss": 1.8559, |
|
"step": 1072 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005072592067988668, |
|
"loss": 1.8485, |
|
"step": 1073 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005071529745042492, |
|
"loss": 1.9306, |
|
"step": 1074 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0005070467422096317, |
|
"loss": 1.9929, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005069405099150141, |
|
"loss": 1.9442, |
|
"step": 1076 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005068342776203966, |
|
"loss": 1.869, |
|
"step": 1077 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005067280453257791, |
|
"loss": 1.9625, |
|
"step": 1078 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005066218130311615, |
|
"loss": 1.9757, |
|
"step": 1079 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005065155807365438, |
|
"loss": 1.9721, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005064093484419263, |
|
"loss": 1.9313, |
|
"step": 1081 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005063031161473087, |
|
"loss": 1.9032, |
|
"step": 1082 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005061968838526912, |
|
"loss": 1.8946, |
|
"step": 1083 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005060906515580736, |
|
"loss": 1.927, |
|
"step": 1084 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.000505984419263456, |
|
"loss": 1.9355, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005058781869688385, |
|
"loss": 1.9144, |
|
"step": 1086 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005057719546742209, |
|
"loss": 1.8711, |
|
"step": 1087 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005056657223796033, |
|
"loss": 1.908, |
|
"step": 1088 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005055594900849858, |
|
"loss": 1.8863, |
|
"step": 1089 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005054532577903683, |
|
"loss": 1.8877, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005053470254957507, |
|
"loss": 1.9125, |
|
"step": 1091 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005052407932011331, |
|
"loss": 1.9083, |
|
"step": 1092 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005051345609065155, |
|
"loss": 1.9148, |
|
"step": 1093 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005050283286118979, |
|
"loss": 1.9313, |
|
"step": 1094 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005049220963172804, |
|
"loss": 1.9542, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005048158640226628, |
|
"loss": 1.8714, |
|
"step": 1096 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005047096317280453, |
|
"loss": 1.9771, |
|
"step": 1097 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005046033994334277, |
|
"loss": 1.8602, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005044971671388101, |
|
"loss": 1.8857, |
|
"step": 1099 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005043909348441926, |
|
"loss": 1.8951, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005042847025495751, |
|
"loss": 1.9468, |
|
"step": 1101 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005041784702549575, |
|
"loss": 1.8194, |
|
"step": 1102 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00050407223796034, |
|
"loss": 1.9466, |
|
"step": 1103 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0005039660056657223, |
|
"loss": 1.8048, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005038597733711047, |
|
"loss": 1.8586, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005037535410764872, |
|
"loss": 1.8783, |
|
"step": 1106 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005036473087818696, |
|
"loss": 1.8999, |
|
"step": 1107 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.000503541076487252, |
|
"loss": 1.9716, |
|
"step": 1108 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005034348441926345, |
|
"loss": 1.9234, |
|
"step": 1109 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005033286118980169, |
|
"loss": 1.9661, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005032223796033994, |
|
"loss": 1.9817, |
|
"step": 1111 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005031161473087819, |
|
"loss": 1.9502, |
|
"step": 1112 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005030099150141643, |
|
"loss": 1.9734, |
|
"step": 1113 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005029036827195467, |
|
"loss": 1.8585, |
|
"step": 1114 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005027974504249292, |
|
"loss": 1.9161, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005026912181303115, |
|
"loss": 1.9213, |
|
"step": 1116 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.000502584985835694, |
|
"loss": 1.9839, |
|
"step": 1117 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005024787535410764, |
|
"loss": 1.9189, |
|
"step": 1118 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005023725212464588, |
|
"loss": 2.0178, |
|
"step": 1119 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005022662889518413, |
|
"loss": 1.9683, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005021600566572237, |
|
"loss": 1.9482, |
|
"step": 1121 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005020538243626062, |
|
"loss": 1.9666, |
|
"step": 1122 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005019475920679887, |
|
"loss": 1.9685, |
|
"step": 1123 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005018413597733711, |
|
"loss": 1.8652, |
|
"step": 1124 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005017351274787535, |
|
"loss": 1.9892, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.000501628895184136, |
|
"loss": 1.8648, |
|
"step": 1126 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005015226628895184, |
|
"loss": 1.9587, |
|
"step": 1127 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005014164305949007, |
|
"loss": 1.9027, |
|
"step": 1128 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005013101983002832, |
|
"loss": 1.8348, |
|
"step": 1129 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005012039660056656, |
|
"loss": 1.9392, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005010977337110481, |
|
"loss": 1.9353, |
|
"step": 1131 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0005009915014164305, |
|
"loss": 1.8492, |
|
"step": 1132 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.000500885269121813, |
|
"loss": 1.9305, |
|
"step": 1133 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0005007790368271954, |
|
"loss": 1.824, |
|
"step": 1134 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0005006728045325779, |
|
"loss": 1.8344, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0005005665722379603, |
|
"loss": 1.9966, |
|
"step": 1136 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0005004603399433428, |
|
"loss": 1.9755, |
|
"step": 1137 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0005003541076487252, |
|
"loss": 1.9514, |
|
"step": 1138 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0005002478753541076, |
|
"loss": 1.876, |
|
"step": 1139 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00050014164305949, |
|
"loss": 1.8808, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0005000354107648724, |
|
"loss": 1.8629, |
|
"step": 1141 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004999291784702549, |
|
"loss": 1.8372, |
|
"step": 1142 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004998229461756373, |
|
"loss": 1.9778, |
|
"step": 1143 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004997167138810198, |
|
"loss": 1.9397, |
|
"step": 1144 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004996104815864022, |
|
"loss": 1.9774, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004995042492917847, |
|
"loss": 1.8552, |
|
"step": 1146 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004993980169971671, |
|
"loss": 1.8838, |
|
"step": 1147 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004992917847025496, |
|
"loss": 1.981, |
|
"step": 1148 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.000499185552407932, |
|
"loss": 1.987, |
|
"step": 1149 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004990793201133144, |
|
"loss": 1.9077, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004989730878186969, |
|
"loss": 1.9345, |
|
"step": 1151 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004988668555240793, |
|
"loss": 1.8886, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004987606232294616, |
|
"loss": 1.8028, |
|
"step": 1153 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004986543909348441, |
|
"loss": 1.8515, |
|
"step": 1154 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004985481586402266, |
|
"loss": 1.9281, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.000498441926345609, |
|
"loss": 1.9567, |
|
"step": 1156 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004983356940509915, |
|
"loss": 1.8555, |
|
"step": 1157 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004982294617563739, |
|
"loss": 1.9253, |
|
"step": 1158 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004981232294617563, |
|
"loss": 1.9719, |
|
"step": 1159 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004980169971671388, |
|
"loss": 1.934, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004979107648725212, |
|
"loss": 1.8554, |
|
"step": 1161 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004978045325779037, |
|
"loss": 1.9549, |
|
"step": 1162 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004976983002832861, |
|
"loss": 1.8785, |
|
"step": 1163 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004975920679886684, |
|
"loss": 1.902, |
|
"step": 1164 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004974858356940509, |
|
"loss": 1.8741, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004973796033994334, |
|
"loss": 1.9525, |
|
"step": 1166 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004972733711048158, |
|
"loss": 1.8687, |
|
"step": 1167 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004971671388101983, |
|
"loss": 1.9317, |
|
"step": 1168 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004970609065155807, |
|
"loss": 1.937, |
|
"step": 1169 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004969546742209631, |
|
"loss": 1.917, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004968484419263456, |
|
"loss": 1.9639, |
|
"step": 1171 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.000496742209631728, |
|
"loss": 1.908, |
|
"step": 1172 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004966359773371104, |
|
"loss": 1.8845, |
|
"step": 1173 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004965297450424929, |
|
"loss": 1.9162, |
|
"step": 1174 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004964235127478753, |
|
"loss": 1.921, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004963172804532578, |
|
"loss": 1.8296, |
|
"step": 1176 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004962110481586403, |
|
"loss": 1.8656, |
|
"step": 1177 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004961048158640226, |
|
"loss": 1.9953, |
|
"step": 1178 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.000495998583569405, |
|
"loss": 1.8856, |
|
"step": 1179 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004958923512747875, |
|
"loss": 1.9518, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004957861189801699, |
|
"loss": 1.9682, |
|
"step": 1181 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004956798866855524, |
|
"loss": 1.9359, |
|
"step": 1182 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004955736543909348, |
|
"loss": 1.7835, |
|
"step": 1183 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004954674220963172, |
|
"loss": 2.0103, |
|
"step": 1184 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004953611898016997, |
|
"loss": 2.0312, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004952549575070821, |
|
"loss": 1.9787, |
|
"step": 1186 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004951487252124646, |
|
"loss": 1.9269, |
|
"step": 1187 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004950424929178471, |
|
"loss": 1.8983, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004949362606232295, |
|
"loss": 1.9233, |
|
"step": 1189 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004948300283286118, |
|
"loss": 1.9143, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004947237960339943, |
|
"loss": 1.9361, |
|
"step": 1191 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004946175637393767, |
|
"loss": 2.0513, |
|
"step": 1192 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004945113314447591, |
|
"loss": 1.8629, |
|
"step": 1193 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004944050991501416, |
|
"loss": 1.9171, |
|
"step": 1194 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.000494298866855524, |
|
"loss": 1.9711, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004941926345609065, |
|
"loss": 1.8462, |
|
"step": 1196 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004940864022662889, |
|
"loss": 1.9059, |
|
"step": 1197 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004939801699716713, |
|
"loss": 1.9364, |
|
"step": 1198 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004938739376770538, |
|
"loss": 1.9621, |
|
"step": 1199 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004937677053824363, |
|
"loss": 1.9355, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_loss": 1.987623929977417, |
|
"eval_runtime": 1467.5466, |
|
"eval_samples_per_second": 9.909, |
|
"eval_steps_per_second": 9.909, |
|
"step": 1200 |
|
} |
|
], |
|
"max_steps": 5848, |
|
"num_train_epochs": 4, |
|
"total_flos": 1.469431232616066e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|