|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9995796553173603, |
|
"global_step": 1189, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 4.8511, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 5.0392, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2e-06, |
|
"loss": 5.0449, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4e-06, |
|
"loss": 4.8611, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 6e-06, |
|
"loss": 4.5775, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8e-06, |
|
"loss": 4.895, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1e-05, |
|
"loss": 4.619, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.2e-05, |
|
"loss": 4.5561, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.2e-05, |
|
"loss": 4.7099, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.4e-05, |
|
"loss": 4.5353, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.6e-05, |
|
"loss": 4.5746, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.8e-05, |
|
"loss": 4.4166, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2e-05, |
|
"loss": 4.6183, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.2e-05, |
|
"loss": 4.3869, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.4e-05, |
|
"loss": 4.2778, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.6e-05, |
|
"loss": 4.4726, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.8e-05, |
|
"loss": 4.4224, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3e-05, |
|
"loss": 4.5754, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.2e-05, |
|
"loss": 4.2828, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 4.4854, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.6e-05, |
|
"loss": 4.4592, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.8e-05, |
|
"loss": 4.336, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4e-05, |
|
"loss": 4.4397, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.2000000000000004e-05, |
|
"loss": 4.4482, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.4e-05, |
|
"loss": 4.2657, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.6e-05, |
|
"loss": 4.4489, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.8e-05, |
|
"loss": 4.3049, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5e-05, |
|
"loss": 4.1245, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.2e-05, |
|
"loss": 4.1784, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.4e-05, |
|
"loss": 4.2998, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.6e-05, |
|
"loss": 4.4284, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.800000000000001e-05, |
|
"loss": 4.2353, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6e-05, |
|
"loss": 4.3895, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6.2e-05, |
|
"loss": 4.0501, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6.4e-05, |
|
"loss": 4.2444, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6.6e-05, |
|
"loss": 4.5839, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 4.3782, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 7.000000000000001e-05, |
|
"loss": 4.2556, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 7.2e-05, |
|
"loss": 4.7111, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 7.4e-05, |
|
"loss": 4.4187, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 7.6e-05, |
|
"loss": 4.1914, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7.8e-05, |
|
"loss": 4.2891, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8e-05, |
|
"loss": 4.2439, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.2e-05, |
|
"loss": 4.2867, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.400000000000001e-05, |
|
"loss": 4.4894, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.599999999999999e-05, |
|
"loss": 4.4172, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.8e-05, |
|
"loss": 4.6963, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 4.7471, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.2e-05, |
|
"loss": 4.9014, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.400000000000001e-05, |
|
"loss": 4.7686, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.6e-05, |
|
"loss": 4.368, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.800000000000001e-05, |
|
"loss": 4.3822, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001, |
|
"loss": 4.2264, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000102, |
|
"loss": 4.2716, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000104, |
|
"loss": 4.4112, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000106, |
|
"loss": 4.191, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000108, |
|
"loss": 4.4509, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00011, |
|
"loss": 4.4127, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000112, |
|
"loss": 4.2537, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000114, |
|
"loss": 4.4896, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00011600000000000001, |
|
"loss": 4.2308, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000118, |
|
"loss": 4.4071, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00012, |
|
"loss": 4.2868, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000122, |
|
"loss": 4.1752, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000124, |
|
"loss": 4.1496, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000126, |
|
"loss": 4.2547, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000128, |
|
"loss": 4.3735, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00013000000000000002, |
|
"loss": 4.243, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000132, |
|
"loss": 4.2763, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000134, |
|
"loss": 4.3281, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00013600000000000003, |
|
"loss": 4.5117, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00013800000000000002, |
|
"loss": 4.4156, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00014000000000000001, |
|
"loss": 4.2873, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00014199999999999998, |
|
"loss": 4.4208, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000144, |
|
"loss": 4.2238, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000146, |
|
"loss": 4.5864, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000148, |
|
"loss": 4.4607, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015, |
|
"loss": 4.2875, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000152, |
|
"loss": 4.2922, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000154, |
|
"loss": 4.4421, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000156, |
|
"loss": 4.433, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000158, |
|
"loss": 4.3725, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00016, |
|
"loss": 4.3333, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000162, |
|
"loss": 4.4473, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000164, |
|
"loss": 4.4238, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00016600000000000002, |
|
"loss": 4.3154, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00016800000000000002, |
|
"loss": 4.2695, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00017, |
|
"loss": 4.4924, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00017199999999999998, |
|
"loss": 4.5737, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000174, |
|
"loss": 4.457, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000176, |
|
"loss": 4.5949, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000178, |
|
"loss": 4.5588, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 4.1973, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000182, |
|
"loss": 4.5561, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000184, |
|
"loss": 4.7577, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000186, |
|
"loss": 4.6404, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018800000000000002, |
|
"loss": 4.6471, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019, |
|
"loss": 4.7406, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000192, |
|
"loss": 4.6403, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000194, |
|
"loss": 4.444, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019600000000000002, |
|
"loss": 4.3784, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019800000000000002, |
|
"loss": 4.6657, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002, |
|
"loss": 4.4524, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000202, |
|
"loss": 4.6985, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000204, |
|
"loss": 4.6463, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000206, |
|
"loss": 4.5927, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000208, |
|
"loss": 4.6364, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00021, |
|
"loss": 4.6561, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000212, |
|
"loss": 4.8036, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000214, |
|
"loss": 4.4123, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000216, |
|
"loss": 4.6414, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000218, |
|
"loss": 4.7836, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00022, |
|
"loss": 4.412, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000222, |
|
"loss": 4.6303, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000224, |
|
"loss": 4.5641, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00022600000000000002, |
|
"loss": 4.7007, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000228, |
|
"loss": 4.4402, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00023, |
|
"loss": 4.2599, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00023200000000000003, |
|
"loss": 4.3835, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00023400000000000002, |
|
"loss": 4.3983, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000236, |
|
"loss": 4.8916, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00023799999999999998, |
|
"loss": 4.5147, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00024, |
|
"loss": 4.555, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000242, |
|
"loss": 4.6935, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000244, |
|
"loss": 4.3677, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000246, |
|
"loss": 4.515, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000248, |
|
"loss": 4.6372, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00025, |
|
"loss": 4.4019, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000252, |
|
"loss": 4.3392, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000254, |
|
"loss": 4.5796, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000256, |
|
"loss": 4.5943, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00025800000000000004, |
|
"loss": 4.4655, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00026000000000000003, |
|
"loss": 4.8017, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000262, |
|
"loss": 4.5575, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000264, |
|
"loss": 4.6614, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000266, |
|
"loss": 4.4765, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000268, |
|
"loss": 4.5643, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00027, |
|
"loss": 4.666, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00027200000000000005, |
|
"loss": 4.6639, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00027400000000000005, |
|
"loss": 4.6461, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00027600000000000004, |
|
"loss": 4.8226, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00027800000000000004, |
|
"loss": 4.5566, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00028000000000000003, |
|
"loss": 4.4941, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00028199999999999997, |
|
"loss": 4.8677, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00028399999999999996, |
|
"loss": 5.048, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00028599999999999996, |
|
"loss": 4.8524, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000288, |
|
"loss": 4.9183, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00029, |
|
"loss": 4.4394, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000292, |
|
"loss": 5.5298, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000294, |
|
"loss": 4.7055, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000296, |
|
"loss": 4.8902, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000298, |
|
"loss": 4.8158, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0003, |
|
"loss": 4.7027, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000302, |
|
"loss": 4.7774, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000304, |
|
"loss": 4.7513, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000306, |
|
"loss": 4.5636, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000308, |
|
"loss": 4.6546, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00031, |
|
"loss": 5.6404, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000312, |
|
"loss": 4.6338, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000314, |
|
"loss": 4.6164, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000316, |
|
"loss": 7.2017, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00031800000000000003, |
|
"loss": 5.0695, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00032, |
|
"loss": 5.8149, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000322, |
|
"loss": 5.784, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000324, |
|
"loss": 5.1999, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000326, |
|
"loss": 5.1967, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000328, |
|
"loss": 6.9382, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00033, |
|
"loss": 5.4285, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00033200000000000005, |
|
"loss": 5.1949, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00033400000000000004, |
|
"loss": 4.737, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00033600000000000004, |
|
"loss": 4.7707, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00033800000000000003, |
|
"loss": 4.817, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00034, |
|
"loss": 4.7532, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000342, |
|
"loss": 5.9552, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00034399999999999996, |
|
"loss": 4.8476, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000346, |
|
"loss": 5.7053, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000348, |
|
"loss": 4.8703, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00035, |
|
"loss": 4.7289, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000352, |
|
"loss": 4.628, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000354, |
|
"loss": 4.8781, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000356, |
|
"loss": 4.8125, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000358, |
|
"loss": 4.9407, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00035999999999999997, |
|
"loss": 4.7761, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000362, |
|
"loss": 5.049, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000364, |
|
"loss": 4.4187, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000366, |
|
"loss": 4.9644, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000368, |
|
"loss": 4.8425, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00037, |
|
"loss": 4.6558, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000372, |
|
"loss": 4.973, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000374, |
|
"loss": 5.0381, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00037600000000000003, |
|
"loss": 5.2445, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000378, |
|
"loss": 5.4255, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00038, |
|
"loss": 4.9689, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000382, |
|
"loss": 5.2518, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000384, |
|
"loss": 4.9659, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000386, |
|
"loss": 5.2984, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000388, |
|
"loss": 4.9212, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00039000000000000005, |
|
"loss": 5.186, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00039200000000000004, |
|
"loss": 5.0575, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00039400000000000004, |
|
"loss": 4.6557, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00039600000000000003, |
|
"loss": 5.1663, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000398, |
|
"loss": 5.0017, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0004, |
|
"loss": 5.2516, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000402, |
|
"loss": 4.8405, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000404, |
|
"loss": 5.2231, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00040600000000000006, |
|
"loss": 5.0627, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000408, |
|
"loss": 4.7863, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00041, |
|
"loss": 5.9023, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000412, |
|
"loss": 4.9873, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000414, |
|
"loss": 5.1966, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000416, |
|
"loss": 4.8328, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00041799999999999997, |
|
"loss": 5.0304, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00042, |
|
"loss": 4.7429, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000422, |
|
"loss": 4.9829, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000424, |
|
"loss": 4.8241, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000426, |
|
"loss": 4.8027, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000428, |
|
"loss": 5.1121, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00043, |
|
"loss": 4.8366, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000432, |
|
"loss": 4.623, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00043400000000000003, |
|
"loss": 5.8336, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000436, |
|
"loss": 4.9087, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000438, |
|
"loss": 5.4127, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00044, |
|
"loss": 5.0507, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000442, |
|
"loss": 5.0366, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000444, |
|
"loss": 4.8739, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000446, |
|
"loss": 4.6306, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000448, |
|
"loss": 4.9492, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00045000000000000004, |
|
"loss": 5.2392, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00045200000000000004, |
|
"loss": 5.1064, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00045400000000000003, |
|
"loss": 4.8969, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000456, |
|
"loss": 4.9746, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000458, |
|
"loss": 4.7355, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00046, |
|
"loss": 5.2297, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000462, |
|
"loss": 5.0465, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00046400000000000006, |
|
"loss": 5.0885, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00046600000000000005, |
|
"loss": 5.3038, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00046800000000000005, |
|
"loss": 4.8304, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00047, |
|
"loss": 4.8464, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000472, |
|
"loss": 4.8042, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000474, |
|
"loss": 4.8883, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00047599999999999997, |
|
"loss": 5.4181, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00047799999999999996, |
|
"loss": 5.1992, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00048, |
|
"loss": 5.1573, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000482, |
|
"loss": 4.9052, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000484, |
|
"loss": 5.9046, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000486, |
|
"loss": 4.7418, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000488, |
|
"loss": 6.3338, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00049, |
|
"loss": 5.2248, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000492, |
|
"loss": 8.206, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000494, |
|
"loss": 5.2911, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000496, |
|
"loss": 5.3111, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000498, |
|
"loss": 4.8883, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005, |
|
"loss": 5.0506, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005020000000000001, |
|
"loss": 4.7748, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000504, |
|
"loss": 4.9331, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000506, |
|
"loss": 5.2438, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000508, |
|
"loss": 4.6562, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00051, |
|
"loss": 5.5853, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000512, |
|
"loss": 5.2, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000514, |
|
"loss": 4.8476, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005160000000000001, |
|
"loss": 4.9591, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000518, |
|
"loss": 5.2288, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005200000000000001, |
|
"loss": 5.0392, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000522, |
|
"loss": 5.1153, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000524, |
|
"loss": 5.0144, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000526, |
|
"loss": 5.4205, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000528, |
|
"loss": 4.859, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005300000000000001, |
|
"loss": 5.0684, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000532, |
|
"loss": 5.0688, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005340000000000001, |
|
"loss": 4.842, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000536, |
|
"loss": 5.0882, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005380000000000001, |
|
"loss": 4.8919, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00054, |
|
"loss": 4.8645, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005420000000000001, |
|
"loss": 4.9041, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005440000000000001, |
|
"loss": 4.9276, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000546, |
|
"loss": 4.7217, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005480000000000001, |
|
"loss": 5.7271, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00055, |
|
"loss": 5.5031, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005520000000000001, |
|
"loss": 5.4163, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000554, |
|
"loss": 5.0355, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005560000000000001, |
|
"loss": 4.9209, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000558, |
|
"loss": 5.0386, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005600000000000001, |
|
"loss": 6.3469, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005620000000000001, |
|
"loss": 5.1925, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005639999999999999, |
|
"loss": 5.1816, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000566, |
|
"loss": 4.9083, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005679999999999999, |
|
"loss": 5.0828, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00057, |
|
"loss": 4.7952, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005719999999999999, |
|
"loss": 5.0554, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000574, |
|
"loss": 5.0934, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000576, |
|
"loss": 5.2574, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000578, |
|
"loss": 6.2128, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00058, |
|
"loss": 4.7037, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005819999999999999, |
|
"loss": 5.0873, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000584, |
|
"loss": 5.2764, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005859999999999999, |
|
"loss": 6.2514, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000588, |
|
"loss": 5.6582, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00059, |
|
"loss": 5.2889, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000592, |
|
"loss": 8.0798, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000594, |
|
"loss": 7.3563, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000596, |
|
"loss": 5.2756, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000598, |
|
"loss": 5.2133, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0006, |
|
"loss": 5.4376, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000602, |
|
"loss": 4.9474, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000604, |
|
"loss": 4.9552, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000606, |
|
"loss": 5.4167, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000608, |
|
"loss": 5.1688, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00061, |
|
"loss": 5.1933, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000612, |
|
"loss": 4.7817, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000614, |
|
"loss": 5.4313, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000616, |
|
"loss": 4.9793, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0006180000000000001, |
|
"loss": 4.8846, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00062, |
|
"loss": 4.8638, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000622, |
|
"loss": 5.2058, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000624, |
|
"loss": 4.7608, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000626, |
|
"loss": 5.1831, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000628, |
|
"loss": 5.3887, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00063, |
|
"loss": 5.0039, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000632, |
|
"loss": 4.9832, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000634, |
|
"loss": 5.0153, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0006360000000000001, |
|
"loss": 5.1506, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000638, |
|
"loss": 4.9482, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00064, |
|
"loss": 4.9163, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000642, |
|
"loss": 5.4786, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000644, |
|
"loss": 4.8748, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000646, |
|
"loss": 6.1923, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000648, |
|
"loss": 5.4042, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0006500000000000001, |
|
"loss": 5.3992, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000652, |
|
"loss": 4.9699, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0006540000000000001, |
|
"loss": 5.8082, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000656, |
|
"loss": 5.466, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0006580000000000001, |
|
"loss": 6.293, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00066, |
|
"loss": 5.1304, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000662, |
|
"loss": 5.2604, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0006640000000000001, |
|
"loss": 5.2558, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000666, |
|
"loss": 5.6875, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0006680000000000001, |
|
"loss": 4.9292, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00067, |
|
"loss": 5.2356, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0006720000000000001, |
|
"loss": 5.2205, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000674, |
|
"loss": 5.3556, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0006760000000000001, |
|
"loss": 5.2928, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0006780000000000001, |
|
"loss": 5.553, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00068, |
|
"loss": 6.2782, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0006820000000000001, |
|
"loss": 5.6736, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000684, |
|
"loss": 4.9139, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0006860000000000001, |
|
"loss": 5.4002, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0006879999999999999, |
|
"loss": 5.517, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00069, |
|
"loss": 6.5166, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000692, |
|
"loss": 5.9373, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000694, |
|
"loss": 5.1641, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000696, |
|
"loss": 5.9066, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0006979999999999999, |
|
"loss": 5.2212, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007, |
|
"loss": 6.0158, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007019999999999999, |
|
"loss": 5.3464, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000704, |
|
"loss": 5.1174, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007059999999999999, |
|
"loss": 5.2882, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000708, |
|
"loss": 5.352, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00071, |
|
"loss": 4.9085, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000712, |
|
"loss": 5.2592, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000714, |
|
"loss": 5.2981, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000716, |
|
"loss": 4.9637, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000718, |
|
"loss": 5.5029, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0007199999999999999, |
|
"loss": 5.0046, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000722, |
|
"loss": 5.0682, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000724, |
|
"loss": 5.8746, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000726, |
|
"loss": 5.8206, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000728, |
|
"loss": 5.1082, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00073, |
|
"loss": 5.3463, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000732, |
|
"loss": 5.7593, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000734, |
|
"loss": 5.5599, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000736, |
|
"loss": 5.4685, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000738, |
|
"loss": 5.5344, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00074, |
|
"loss": 5.1595, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000742, |
|
"loss": 5.197, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000744, |
|
"loss": 5.1699, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000746, |
|
"loss": 5.4809, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000748, |
|
"loss": 5.297, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00075, |
|
"loss": 5.2932, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0007520000000000001, |
|
"loss": 5.2886, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000754, |
|
"loss": 5.3678, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000756, |
|
"loss": 4.9895, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000758, |
|
"loss": 5.2246, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00076, |
|
"loss": 5.0254, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000762, |
|
"loss": 5.3305, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000764, |
|
"loss": 5.3363, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0007660000000000001, |
|
"loss": 5.0416, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000768, |
|
"loss": 5.2708, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0007700000000000001, |
|
"loss": 5.1443, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000772, |
|
"loss": 5.4075, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0007740000000000001, |
|
"loss": 5.3772, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000776, |
|
"loss": 5.3105, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000778, |
|
"loss": 6.8609, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0007800000000000001, |
|
"loss": 5.0175, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000782, |
|
"loss": 5.3111, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0007840000000000001, |
|
"loss": 4.9941, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000786, |
|
"loss": 5.9271, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0007880000000000001, |
|
"loss": 5.6516, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00079, |
|
"loss": 5.606, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0007920000000000001, |
|
"loss": 6.9485, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0007940000000000001, |
|
"loss": 8.2026, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000796, |
|
"loss": 5.7139, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0007980000000000001, |
|
"loss": 5.5075, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0008, |
|
"loss": 5.4495, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0008020000000000001, |
|
"loss": 5.4492, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000804, |
|
"loss": 5.4174, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0008060000000000001, |
|
"loss": 5.1429, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000808, |
|
"loss": 5.228, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0008100000000000001, |
|
"loss": 5.9743, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0008120000000000001, |
|
"loss": 5.346, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0008139999999999999, |
|
"loss": 5.4215, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000816, |
|
"loss": 5.534, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0008179999999999999, |
|
"loss": 5.3026, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00082, |
|
"loss": 5.4964, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0008219999999999999, |
|
"loss": 5.0144, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000824, |
|
"loss": 5.3684, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000826, |
|
"loss": 5.6391, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000828, |
|
"loss": 4.9192, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00083, |
|
"loss": 5.2417, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000832, |
|
"loss": 5.269, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000834, |
|
"loss": 5.4544, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0008359999999999999, |
|
"loss": 5.6599, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000838, |
|
"loss": 5.1408, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00084, |
|
"loss": 5.0571, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000842, |
|
"loss": 5.0662, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000844, |
|
"loss": 5.7209, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000846, |
|
"loss": 4.9936, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000848, |
|
"loss": 5.0084, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00085, |
|
"loss": 5.3047, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000852, |
|
"loss": 5.954, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000854, |
|
"loss": 5.6528, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000856, |
|
"loss": 5.3661, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000858, |
|
"loss": 5.3309, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00086, |
|
"loss": 5.4359, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000862, |
|
"loss": 5.2456, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000864, |
|
"loss": 5.368, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000866, |
|
"loss": 5.106, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0008680000000000001, |
|
"loss": 5.0062, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00087, |
|
"loss": 5.0479, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000872, |
|
"loss": 5.4351, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000874, |
|
"loss": 5.094, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000876, |
|
"loss": 5.372, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000878, |
|
"loss": 5.2698, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00088, |
|
"loss": 5.1859, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000882, |
|
"loss": 5.2186, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000884, |
|
"loss": 4.9551, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0008860000000000001, |
|
"loss": 5.2439, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000888, |
|
"loss": 8.0543, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0008900000000000001, |
|
"loss": 5.4051, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000892, |
|
"loss": 5.519, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000894, |
|
"loss": 5.5713, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000896, |
|
"loss": 5.3046, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000898, |
|
"loss": 5.1503, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0009000000000000001, |
|
"loss": 5.2802, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000902, |
|
"loss": 5.415, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0009040000000000001, |
|
"loss": 6.009, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000906, |
|
"loss": 5.8562, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0009080000000000001, |
|
"loss": 5.4349, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00091, |
|
"loss": 7.5475, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000912, |
|
"loss": 5.4009, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009140000000000001, |
|
"loss": 5.4328, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000916, |
|
"loss": 5.5625, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009180000000000001, |
|
"loss": 5.3081, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00092, |
|
"loss": 5.2885, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009220000000000001, |
|
"loss": 5.7384, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000924, |
|
"loss": 5.3471, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009260000000000001, |
|
"loss": 5.5417, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009280000000000001, |
|
"loss": 5.2314, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00093, |
|
"loss": 5.2603, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009320000000000001, |
|
"loss": 5.1603, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000934, |
|
"loss": 5.4647, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0009360000000000001, |
|
"loss": 5.2128, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0009379999999999999, |
|
"loss": 5.3251, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00094, |
|
"loss": 5.2272, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000942, |
|
"loss": 5.37, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000944, |
|
"loss": 5.7458, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000946, |
|
"loss": 6.0803, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000948, |
|
"loss": 5.0569, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00095, |
|
"loss": 5.4589, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0009519999999999999, |
|
"loss": 5.1716, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000954, |
|
"loss": 5.4422, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0009559999999999999, |
|
"loss": 5.0668, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000958, |
|
"loss": 5.2654, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00096, |
|
"loss": 6.7736, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000962, |
|
"loss": 5.7394, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000964, |
|
"loss": 5.4959, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000966, |
|
"loss": 5.5867, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000968, |
|
"loss": 5.2123, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0009699999999999999, |
|
"loss": 5.4199, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000972, |
|
"loss": 5.8817, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000974, |
|
"loss": 5.1656, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000976, |
|
"loss": 5.7833, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000978, |
|
"loss": 5.7503, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00098, |
|
"loss": 6.3054, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000982, |
|
"loss": 6.0516, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000984, |
|
"loss": 6.7448, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0009860000000000001, |
|
"loss": 5.8998, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000988, |
|
"loss": 5.523, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00099, |
|
"loss": 5.4295, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000992, |
|
"loss": 5.2345, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000994, |
|
"loss": 5.6082, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_loss": 5.971189498901367, |
|
"eval_runtime": 772.5606, |
|
"eval_samples_per_second": 3.42, |
|
"eval_steps_per_second": 0.286, |
|
"eval_wer": 1.9655890519635066, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000996, |
|
"loss": 6.0055, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000998, |
|
"loss": 5.5965, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.001, |
|
"loss": 5.4823, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0009985486211901307, |
|
"loss": 5.3086, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0009970972423802612, |
|
"loss": 5.3724, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.000995645863570392, |
|
"loss": 5.6485, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009941944847605226, |
|
"loss": 5.4129, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009927431059506531, |
|
"loss": 5.3835, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009912917271407838, |
|
"loss": 5.3629, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009898403483309143, |
|
"loss": 5.6084, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.000988388969521045, |
|
"loss": 5.522, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009869375907111755, |
|
"loss": 5.1979, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009854862119013062, |
|
"loss": 5.5267, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.000984034833091437, |
|
"loss": 5.0734, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009825834542815674, |
|
"loss": 5.1902, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009811320754716981, |
|
"loss": 5.2944, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009796806966618288, |
|
"loss": 5.5136, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009782293178519593, |
|
"loss": 5.4618, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00097677793904209, |
|
"loss": 5.7373, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009753265602322206, |
|
"loss": 5.0968, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009738751814223512, |
|
"loss": 5.3119, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009724238026124818, |
|
"loss": 5.2911, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009709724238026125, |
|
"loss": 6.3803, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.000969521044992743, |
|
"loss": 5.6489, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009680696661828737, |
|
"loss": 5.6184, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009666182873730044, |
|
"loss": 5.0753, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.000965166908563135, |
|
"loss": 5.3418, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009637155297532656, |
|
"loss": 5.7998, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009622641509433962, |
|
"loss": 5.3236, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009608127721335269, |
|
"loss": 5.3762, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009593613933236574, |
|
"loss": 5.4276, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009579100145137881, |
|
"loss": 5.3769, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009564586357039187, |
|
"loss": 5.2204, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009550072568940493, |
|
"loss": 5.3234, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009535558780841799, |
|
"loss": 5.2219, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009521044992743106, |
|
"loss": 5.2035, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009506531204644411, |
|
"loss": 5.6239, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009492017416545718, |
|
"loss": 5.3629, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009477503628447026, |
|
"loss": 5.3972, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009462989840348331, |
|
"loss": 5.709, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009448476052249637, |
|
"loss": 5.5734, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009433962264150943, |
|
"loss": 5.5395, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.000941944847605225, |
|
"loss": 5.1856, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009404934687953555, |
|
"loss": 5.4564, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009390420899854863, |
|
"loss": 5.4666, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009375907111756169, |
|
"loss": 5.2829, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009361393323657474, |
|
"loss": 5.8558, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.000934687953555878, |
|
"loss": 7.3071, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009332365747460088, |
|
"loss": 5.4414, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009317851959361394, |
|
"loss": 6.2957, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00093033381712627, |
|
"loss": 5.7204, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009288824383164007, |
|
"loss": 5.3751, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009274310595065311, |
|
"loss": 5.6019, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009259796806966619, |
|
"loss": 5.397, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009245283018867925, |
|
"loss": 5.7751, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009230769230769232, |
|
"loss": 5.6476, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009216255442670537, |
|
"loss": 5.2722, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009201741654571844, |
|
"loss": 5.7729, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000918722786647315, |
|
"loss": 5.6173, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009172714078374456, |
|
"loss": 5.51, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009158200290275763, |
|
"loss": 5.3482, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009143686502177069, |
|
"loss": 5.4823, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009129172714078375, |
|
"loss": 6.2471, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009114658925979681, |
|
"loss": 4.9672, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009100145137880988, |
|
"loss": 5.2456, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0009085631349782293, |
|
"loss": 5.1968, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00090711175616836, |
|
"loss": 5.5594, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0009056603773584906, |
|
"loss": 5.7127, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0009042089985486213, |
|
"loss": 5.1732, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0009027576197387518, |
|
"loss": 5.2284, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0009013062409288825, |
|
"loss": 5.4143, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0008998548621190132, |
|
"loss": 5.3897, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0008984034833091437, |
|
"loss": 5.3026, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0008969521044992744, |
|
"loss": 5.3856, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.000895500725689405, |
|
"loss": 5.4547, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0008940493468795356, |
|
"loss": 5.4656, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008925979680696662, |
|
"loss": 5.5619, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008911465892597969, |
|
"loss": 5.1546, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008896952104499274, |
|
"loss": 5.3612, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008882438316400581, |
|
"loss": 5.4632, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008867924528301887, |
|
"loss": 5.452, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008853410740203193, |
|
"loss": 5.2672, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00088388969521045, |
|
"loss": 5.3376, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008824383164005806, |
|
"loss": 5.2473, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008809869375907113, |
|
"loss": 5.5358, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008795355587808418, |
|
"loss": 5.5731, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008780841799709725, |
|
"loss": 5.5165, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0008766328011611031, |
|
"loss": 5.4301, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008751814223512337, |
|
"loss": 5.4791, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008737300435413643, |
|
"loss": 5.4197, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.000872278664731495, |
|
"loss": 5.2328, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008708272859216255, |
|
"loss": 5.0995, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008693759071117562, |
|
"loss": 5.7272, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008679245283018869, |
|
"loss": 5.3319, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008664731494920174, |
|
"loss": 5.0899, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008650217706821481, |
|
"loss": 5.0355, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008635703918722787, |
|
"loss": 5.2191, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008621190130624093, |
|
"loss": 6.9456, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008606676342525399, |
|
"loss": 5.5227, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008592162554426706, |
|
"loss": 6.2959, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008577648766328012, |
|
"loss": 5.3511, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008563134978229318, |
|
"loss": 5.6864, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008548621190130624, |
|
"loss": 5.564, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008534107402031931, |
|
"loss": 5.2273, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008519593613933237, |
|
"loss": 5.4808, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008505079825834543, |
|
"loss": 5.5164, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.000849056603773585, |
|
"loss": 5.3247, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008476052249637155, |
|
"loss": 5.4717, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008461538461538462, |
|
"loss": 5.2743, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008447024673439768, |
|
"loss": 5.3684, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0008432510885341074, |
|
"loss": 5.5687, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.000841799709724238, |
|
"loss": 5.1055, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008403483309143687, |
|
"loss": 5.9791, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008388969521044993, |
|
"loss": 5.0076, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008374455732946299, |
|
"loss": 5.1948, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008359941944847606, |
|
"loss": 5.2781, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008345428156748912, |
|
"loss": 5.2203, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008330914368650218, |
|
"loss": 7.2738, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008316400580551524, |
|
"loss": 5.4513, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008301886792452831, |
|
"loss": 5.383, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008287373004354136, |
|
"loss": 5.0347, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008272859216255443, |
|
"loss": 5.0027, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008258345428156749, |
|
"loss": 5.8913, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0008243831640058055, |
|
"loss": 5.4649, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008229317851959361, |
|
"loss": 5.7076, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008214804063860668, |
|
"loss": 5.7467, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008200290275761973, |
|
"loss": 5.3819, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.000818577648766328, |
|
"loss": 4.9549, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008171262699564587, |
|
"loss": 5.2382, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008156748911465893, |
|
"loss": 5.5093, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008142235123367199, |
|
"loss": 5.3199, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008127721335268505, |
|
"loss": 5.271, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008113207547169812, |
|
"loss": 5.7689, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008098693759071117, |
|
"loss": 5.1918, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0008084179970972424, |
|
"loss": 5.5693, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.000806966618287373, |
|
"loss": 5.2135, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0008055152394775036, |
|
"loss": 4.9668, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0008040638606676342, |
|
"loss": 5.0565, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0008026124818577649, |
|
"loss": 5.2087, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0008011611030478955, |
|
"loss": 5.3371, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0007997097242380261, |
|
"loss": 5.1342, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0007982583454281568, |
|
"loss": 5.8708, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0007968069666182874, |
|
"loss": 5.8123, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.000795355587808418, |
|
"loss": 5.3119, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0007939042089985486, |
|
"loss": 5.3256, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0007924528301886793, |
|
"loss": 5.4008, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0007910014513788098, |
|
"loss": 4.8679, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0007895500725689405, |
|
"loss": 6.0724, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007880986937590711, |
|
"loss": 5.5273, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007866473149492017, |
|
"loss": 4.9108, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007851959361393324, |
|
"loss": 5.6022, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.000783744557329463, |
|
"loss": 5.1818, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007822931785195936, |
|
"loss": 5.1802, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007808417997097242, |
|
"loss": 5.4167, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007793904208998549, |
|
"loss": 5.1972, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007779390420899854, |
|
"loss": 5.0685, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007764876632801161, |
|
"loss": 5.3508, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007750362844702467, |
|
"loss": 5.5123, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007735849056603774, |
|
"loss": 5.4158, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0007721335268505079, |
|
"loss": 5.2287, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007706821480406386, |
|
"loss": 5.3385, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007692307692307693, |
|
"loss": 5.1604, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007677793904208998, |
|
"loss": 5.1538, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007663280116110305, |
|
"loss": 5.1191, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007648766328011611, |
|
"loss": 5.5619, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007634252539912917, |
|
"loss": 5.298, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007619738751814223, |
|
"loss": 5.2461, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.000760522496371553, |
|
"loss": 5.0993, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007590711175616835, |
|
"loss": 5.5692, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007576197387518142, |
|
"loss": 5.6514, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007561683599419448, |
|
"loss": 5.6489, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0007547169811320755, |
|
"loss": 5.1456, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007532656023222061, |
|
"loss": 5.2761, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007518142235123367, |
|
"loss": 5.1052, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007503628447024674, |
|
"loss": 5.1917, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007489114658925979, |
|
"loss": 5.0055, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007474600870827286, |
|
"loss": 5.3781, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007460087082728592, |
|
"loss": 5.3236, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007445573294629898, |
|
"loss": 5.1835, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007431059506531204, |
|
"loss": 5.3368, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007416545718432511, |
|
"loss": 4.9094, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007402031930333816, |
|
"loss": 4.8995, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0007387518142235123, |
|
"loss": 5.1664, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.000737300435413643, |
|
"loss": 5.0402, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007358490566037735, |
|
"loss": 5.5918, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007343976777939043, |
|
"loss": 5.3654, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007329462989840348, |
|
"loss": 5.1001, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007314949201741656, |
|
"loss": 5.1216, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.000730043541364296, |
|
"loss": 5.2851, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007285921625544268, |
|
"loss": 5.2563, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007271407837445574, |
|
"loss": 5.4761, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.000725689404934688, |
|
"loss": 5.2268, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007242380261248185, |
|
"loss": 5.3059, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007227866473149493, |
|
"loss": 5.8417, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0007213352685050797, |
|
"loss": 5.3375, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007198838896952105, |
|
"loss": 5.0948, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007184325108853412, |
|
"loss": 5.3722, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007169811320754717, |
|
"loss": 5.5119, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007155297532656024, |
|
"loss": 4.9806, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.000714078374455733, |
|
"loss": 5.9573, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007126269956458637, |
|
"loss": 5.9752, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007111756168359942, |
|
"loss": 4.982, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007097242380261249, |
|
"loss": 5.3352, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007082728592162555, |
|
"loss": 5.1336, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007068214804063861, |
|
"loss": 5.1104, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007053701015965167, |
|
"loss": 5.1668, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0007039187227866474, |
|
"loss": 5.2123, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.000702467343976778, |
|
"loss": 5.4632, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0007010159651669086, |
|
"loss": 5.2234, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006995645863570393, |
|
"loss": 6.1649, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006981132075471698, |
|
"loss": 4.9586, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006966618287373005, |
|
"loss": 5.3222, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006952104499274311, |
|
"loss": 5.4812, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006937590711175617, |
|
"loss": 5.0724, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006923076923076923, |
|
"loss": 5.1864, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.000690856313497823, |
|
"loss": 5.0248, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006894049346879536, |
|
"loss": 5.0635, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006879535558780842, |
|
"loss": 5.3964, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0006865021770682149, |
|
"loss": 5.0607, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006850507982583455, |
|
"loss": 5.0457, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006835994194484761, |
|
"loss": 5.0219, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006821480406386067, |
|
"loss": 4.9143, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006806966618287374, |
|
"loss": 5.4245, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006792452830188679, |
|
"loss": 4.8856, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006777939042089986, |
|
"loss": 5.1959, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006763425253991292, |
|
"loss": 5.9459, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006748911465892598, |
|
"loss": 4.961, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006734397677793904, |
|
"loss": 5.1239, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006719883889695211, |
|
"loss": 4.8281, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006705370101596517, |
|
"loss": 5.3037, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0006690856313497823, |
|
"loss": 5.4818, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.000667634252539913, |
|
"loss": 5.378, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006661828737300436, |
|
"loss": 4.9808, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006647314949201742, |
|
"loss": 5.0709, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006632801161103048, |
|
"loss": 5.2164, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006618287373004355, |
|
"loss": 4.9492, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.000660377358490566, |
|
"loss": 5.4681, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006589259796806967, |
|
"loss": 5.511, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006574746008708273, |
|
"loss": 5.1693, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006560232220609579, |
|
"loss": 5.0552, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006545718432510886, |
|
"loss": 5.2149, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006531204644412192, |
|
"loss": 6.3403, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0006516690856313498, |
|
"loss": 5.3862, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006502177068214804, |
|
"loss": 5.0721, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006487663280116111, |
|
"loss": 5.5026, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006473149492017417, |
|
"loss": 6.4226, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006458635703918723, |
|
"loss": 5.3436, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006444121915820029, |
|
"loss": 6.4222, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006429608127721336, |
|
"loss": 6.0071, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006415094339622641, |
|
"loss": 5.0389, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006400580551523948, |
|
"loss": 5.3047, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006386066763425255, |
|
"loss": 5.3622, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.000637155297532656, |
|
"loss": 5.2451, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006357039187227867, |
|
"loss": 4.9362, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0006342525399129173, |
|
"loss": 5.3065, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006328011611030479, |
|
"loss": 5.0147, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006313497822931785, |
|
"loss": 5.0598, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006298984034833092, |
|
"loss": 5.2496, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006284470246734397, |
|
"loss": 4.8978, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006269956458635704, |
|
"loss": 5.4418, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.000625544267053701, |
|
"loss": 4.8784, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006240928882438317, |
|
"loss": 5.0095, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006226415094339623, |
|
"loss": 5.0565, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006211901306240929, |
|
"loss": 5.0263, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006197387518142236, |
|
"loss": 5.1925, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006182873730043541, |
|
"loss": 4.9813, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0006168359941944848, |
|
"loss": 5.2589, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006153846153846154, |
|
"loss": 4.7593, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.000613933236574746, |
|
"loss": 5.015, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006124818577648766, |
|
"loss": 5.1436, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006110304789550073, |
|
"loss": 5.149, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006095791001451378, |
|
"loss": 5.1044, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006081277213352685, |
|
"loss": 4.9833, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006066763425253992, |
|
"loss": 5.2861, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006052249637155298, |
|
"loss": 5.3884, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006037735849056604, |
|
"loss": 4.8799, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.000602322206095791, |
|
"loss": 5.0057, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0006008708272859217, |
|
"loss": 5.2477, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005994194484760522, |
|
"loss": 5.1023, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005979680696661829, |
|
"loss": 5.1842, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005965166908563135, |
|
"loss": 5.0125, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005950653120464441, |
|
"loss": 5.4608, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005936139332365747, |
|
"loss": 4.9112, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005921625544267054, |
|
"loss": 5.1474, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005907111756168359, |
|
"loss": 5.1718, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005892597968069666, |
|
"loss": 5.1529, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005878084179970973, |
|
"loss": 5.2616, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005863570391872278, |
|
"loss": 5.062, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005849056603773585, |
|
"loss": 5.0368, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005834542815674891, |
|
"loss": 5.0685, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0005820029027576198, |
|
"loss": 5.2184, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005805515239477503, |
|
"loss": 5.2071, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.000579100145137881, |
|
"loss": 5.3898, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005776487663280116, |
|
"loss": 5.2976, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005761973875181422, |
|
"loss": 5.167, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005747460087082728, |
|
"loss": 5.3693, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005732946298984035, |
|
"loss": 4.8185, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005718432510885341, |
|
"loss": 5.3253, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005703918722786647, |
|
"loss": 5.8208, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005689404934687954, |
|
"loss": 4.9468, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005674891146589259, |
|
"loss": 5.1776, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005660377358490566, |
|
"loss": 4.9745, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005645863570391872, |
|
"loss": 5.0746, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005631349782293179, |
|
"loss": 5.2983, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005616835994194484, |
|
"loss": 4.86, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005602322206095791, |
|
"loss": 4.9945, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005587808417997097, |
|
"loss": 6.0606, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005573294629898403, |
|
"loss": 5.1517, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000555878084179971, |
|
"loss": 5.1314, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005544267053701016, |
|
"loss": 5.1679, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005529753265602322, |
|
"loss": 4.9893, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005515239477503628, |
|
"loss": 5.6067, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0005500725689404935, |
|
"loss": 4.9295, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000548621190130624, |
|
"loss": 5.3404, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005471698113207547, |
|
"loss": 5.0729, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005457184325108853, |
|
"loss": 5.0503, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005442670537010159, |
|
"loss": 5.0757, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005428156748911465, |
|
"loss": 4.7697, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005413642960812772, |
|
"loss": 4.819, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.000539912917271408, |
|
"loss": 5.089, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005384615384615384, |
|
"loss": 5.1247, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005370101596516691, |
|
"loss": 4.8385, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005355587808417997, |
|
"loss": 5.1001, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005341074020319303, |
|
"loss": 5.4633, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005326560232220609, |
|
"loss": 4.9786, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0005312046444121916, |
|
"loss": 5.1771, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005297532656023221, |
|
"loss": 5.0881, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005283018867924528, |
|
"loss": 5.0811, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005268505079825834, |
|
"loss": 4.9207, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.000525399129172714, |
|
"loss": 5.0726, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005239477503628448, |
|
"loss": 5.1162, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005224963715529754, |
|
"loss": 5.4175, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005210449927431061, |
|
"loss": 5.0988, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005195936139332365, |
|
"loss": 5.1395, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005181422351233673, |
|
"loss": 5.0437, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005166908563134979, |
|
"loss": 5.0964, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0005152394775036285, |
|
"loss": 5.0094, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.000513788098693759, |
|
"loss": 4.5872, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005123367198838898, |
|
"loss": 5.1621, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005108853410740202, |
|
"loss": 5.2346, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.000509433962264151, |
|
"loss": 5.0822, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005079825834542817, |
|
"loss": 5.3804, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005065312046444122, |
|
"loss": 5.6472, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005050798258345429, |
|
"loss": 5.3193, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005036284470246735, |
|
"loss": 4.9138, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005021770682148041, |
|
"loss": 4.9016, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005007256894049347, |
|
"loss": 6.7137, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0004992743105950654, |
|
"loss": 4.6902, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.000497822931785196, |
|
"loss": 5.2458, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0004963715529753266, |
|
"loss": 5.153, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004949201741654572, |
|
"loss": 5.5571, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004934687953555878, |
|
"loss": 5.0553, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004920174165457185, |
|
"loss": 5.0873, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004905660377358491, |
|
"loss": 5.0853, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004891146589259797, |
|
"loss": 5.098, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004876632801161103, |
|
"loss": 5.0839, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004862119013062409, |
|
"loss": 5.0078, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004847605224963715, |
|
"loss": 4.7948, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004833091436865022, |
|
"loss": 5.0643, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004818577648766328, |
|
"loss": 5.1501, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00048040638606676347, |
|
"loss": 4.67, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00047895500725689407, |
|
"loss": 4.9893, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00047750362844702467, |
|
"loss": 4.9115, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004760522496371553, |
|
"loss": 4.9451, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004746008708272859, |
|
"loss": 4.9671, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004731494920174166, |
|
"loss": 4.9126, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004716981132075472, |
|
"loss": 4.7534, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00047024673439767777, |
|
"loss": 4.9833, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004687953555878084, |
|
"loss": 5.1847, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.000467343976777939, |
|
"loss": 4.5659, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004658925979680697, |
|
"loss": 4.6605, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00046444121915820033, |
|
"loss": 4.7457, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00046298984034833093, |
|
"loss": 5.0637, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004615384615384616, |
|
"loss": 4.951, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004600870827285922, |
|
"loss": 4.9564, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004586357039187228, |
|
"loss": 4.7754, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00045718432510885343, |
|
"loss": 4.7696, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00045573294629898403, |
|
"loss": 5.3184, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00045428156748911463, |
|
"loss": 4.9474, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004528301886792453, |
|
"loss": 5.0461, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004513788098693759, |
|
"loss": 4.7707, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004499274310595066, |
|
"loss": 5.0858, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004484760522496372, |
|
"loss": 4.939, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004470246734397678, |
|
"loss": 4.9511, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00044557329462989844, |
|
"loss": 4.7977, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00044412191582002904, |
|
"loss": 5.0046, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00044267053701015964, |
|
"loss": 4.9981, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0004412191582002903, |
|
"loss": 4.8977, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0004397677793904209, |
|
"loss": 4.9723, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00043831640058055154, |
|
"loss": 5.2849, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00043686502177068214, |
|
"loss": 6.5805, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00043541364296081274, |
|
"loss": 4.8901, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00043396226415094345, |
|
"loss": 5.1268, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00043251088534107404, |
|
"loss": 5.0544, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00043105950653120464, |
|
"loss": 4.8594, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0004296081277213353, |
|
"loss": 5.0497, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0004281567489114659, |
|
"loss": 4.9975, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00042670537010159655, |
|
"loss": 4.9852, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00042525399129172715, |
|
"loss": 5.1585, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00042380261248185774, |
|
"loss": 4.8233, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0004223512336719884, |
|
"loss": 4.9715, |
|
"step": 901 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.000420899854862119, |
|
"loss": 5.0533, |
|
"step": 902 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00041944847605224965, |
|
"loss": 5.0464, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0004179970972423803, |
|
"loss": 5.0582, |
|
"step": 904 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0004165457184325109, |
|
"loss": 4.9427, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00041509433962264155, |
|
"loss": 5.122, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00041364296081277215, |
|
"loss": 4.8487, |
|
"step": 907 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00041219158200290275, |
|
"loss": 5.4142, |
|
"step": 908 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0004107402031930334, |
|
"loss": 5.0128, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.000409288824383164, |
|
"loss": 5.1977, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00040783744557329466, |
|
"loss": 5.0896, |
|
"step": 911 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00040638606676342525, |
|
"loss": 4.8231, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00040493468795355585, |
|
"loss": 4.9305, |
|
"step": 913 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0004034833091436865, |
|
"loss": 4.9777, |
|
"step": 914 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0004020319303338171, |
|
"loss": 5.1488, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00040058055152394776, |
|
"loss": 4.821, |
|
"step": 916 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0003991291727140784, |
|
"loss": 4.8961, |
|
"step": 917 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.000397677793904209, |
|
"loss": 4.9929, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00039622641509433966, |
|
"loss": 5.1567, |
|
"step": 919 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00039477503628447026, |
|
"loss": 5.0701, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00039332365747460086, |
|
"loss": 4.827, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0003918722786647315, |
|
"loss": 5.2814, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0003904208998548621, |
|
"loss": 4.7432, |
|
"step": 923 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0003889695210449927, |
|
"loss": 4.9114, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00038751814223512336, |
|
"loss": 4.694, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00038606676342525396, |
|
"loss": 4.8725, |
|
"step": 926 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00038461538461538467, |
|
"loss": 5.5212, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00038316400580551527, |
|
"loss": 4.9127, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00038171262699564587, |
|
"loss": 4.7343, |
|
"step": 929 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0003802612481857765, |
|
"loss": 4.799, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0003788098693759071, |
|
"loss": 5.0624, |
|
"step": 931 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00037735849056603777, |
|
"loss": 5.5152, |
|
"step": 932 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00037590711175616837, |
|
"loss": 4.9362, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00037445573294629897, |
|
"loss": 4.8447, |
|
"step": 934 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003730043541364296, |
|
"loss": 5.0213, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003715529753265602, |
|
"loss": 4.9314, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003701015965166908, |
|
"loss": 5.0127, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003686502177068215, |
|
"loss": 5.0349, |
|
"step": 938 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003671988388969521, |
|
"loss": 4.9583, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003657474600870828, |
|
"loss": 4.9811, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003642960812772134, |
|
"loss": 4.8891, |
|
"step": 941 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.000362844702467344, |
|
"loss": 4.9644, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00036139332365747463, |
|
"loss": 4.5369, |
|
"step": 943 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00035994194484760523, |
|
"loss": 4.6659, |
|
"step": 944 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003584905660377358, |
|
"loss": 5.2977, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0003570391872278665, |
|
"loss": 4.7141, |
|
"step": 946 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0003555878084179971, |
|
"loss": 4.8314, |
|
"step": 947 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00035413642960812773, |
|
"loss": 4.6193, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00035268505079825833, |
|
"loss": 4.8419, |
|
"step": 949 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.000351233671988389, |
|
"loss": 4.8189, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00034978229317851964, |
|
"loss": 4.8661, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00034833091436865023, |
|
"loss": 5.1865, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00034687953555878083, |
|
"loss": 4.8, |
|
"step": 953 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0003454281567489115, |
|
"loss": 4.6069, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0003439767779390421, |
|
"loss": 5.0117, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00034252539912917274, |
|
"loss": 5.0603, |
|
"step": 956 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00034107402031930334, |
|
"loss": 5.0904, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00033962264150943393, |
|
"loss": 4.9147, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003381712626995646, |
|
"loss": 5.0641, |
|
"step": 959 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003367198838896952, |
|
"loss": 4.8329, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00033526850507982584, |
|
"loss": 4.7659, |
|
"step": 961 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003338171262699565, |
|
"loss": 4.9337, |
|
"step": 962 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003323657474600871, |
|
"loss": 4.6835, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00033091436865021774, |
|
"loss": 5.1411, |
|
"step": 964 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00032946298984034834, |
|
"loss": 4.9891, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00032801161103047894, |
|
"loss": 4.7431, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003265602322206096, |
|
"loss": 5.0134, |
|
"step": 967 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003251088534107402, |
|
"loss": 4.9053, |
|
"step": 968 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00032365747460087085, |
|
"loss": 4.6344, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00032220609579100144, |
|
"loss": 4.8781, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00032075471698113204, |
|
"loss": 4.7326, |
|
"step": 971 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00031930333817126275, |
|
"loss": 4.6855, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00031785195936139335, |
|
"loss": 4.9818, |
|
"step": 973 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00031640058055152395, |
|
"loss": 4.7883, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0003149492017416546, |
|
"loss": 4.9197, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0003134978229317852, |
|
"loss": 4.917, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00031204644412191585, |
|
"loss": 4.8078, |
|
"step": 977 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00031059506531204645, |
|
"loss": 4.7798, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00030914368650217705, |
|
"loss": 4.6256, |
|
"step": 979 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0003076923076923077, |
|
"loss": 4.9239, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0003062409288824383, |
|
"loss": 5.1256, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0003047895500725689, |
|
"loss": 5.2522, |
|
"step": 982 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0003033381712626996, |
|
"loss": 4.7731, |
|
"step": 983 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0003018867924528302, |
|
"loss": 4.9897, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00030043541364296086, |
|
"loss": 4.7127, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00029898403483309146, |
|
"loss": 4.8191, |
|
"step": 986 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00029753265602322206, |
|
"loss": 4.9202, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002960812772133527, |
|
"loss": 4.8658, |
|
"step": 988 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002946298984034833, |
|
"loss": 4.9547, |
|
"step": 989 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002931785195936139, |
|
"loss": 4.7892, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00029172714078374456, |
|
"loss": 5.0276, |
|
"step": 991 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00029027576197387516, |
|
"loss": 4.7144, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002888243831640058, |
|
"loss": 4.6585, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002873730043541364, |
|
"loss": 4.8446, |
|
"step": 994 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00028592162554426706, |
|
"loss": 4.8927, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002844702467343977, |
|
"loss": 5.8035, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002830188679245283, |
|
"loss": 5.0722, |
|
"step": 997 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00028156748911465897, |
|
"loss": 4.7843, |
|
"step": 998 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00028011611030478957, |
|
"loss": 4.9298, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00027866473149492017, |
|
"loss": 5.0039, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_loss": 5.048862457275391, |
|
"eval_runtime": 760.3008, |
|
"eval_samples_per_second": 3.475, |
|
"eval_steps_per_second": 0.291, |
|
"eval_wer": 1.9565251884172947, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002772133526850508, |
|
"loss": 4.8261, |
|
"step": 1001 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002757619738751814, |
|
"loss": 4.7969, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.000274310595065312, |
|
"loss": 4.7738, |
|
"step": 1003 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00027285921625544267, |
|
"loss": 4.9683, |
|
"step": 1004 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00027140783744557327, |
|
"loss": 4.9032, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.000269956458635704, |
|
"loss": 4.8847, |
|
"step": 1006 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002685050798258346, |
|
"loss": 4.7633, |
|
"step": 1007 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00026705370101596517, |
|
"loss": 4.7142, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002656023222060958, |
|
"loss": 4.8657, |
|
"step": 1009 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002641509433962264, |
|
"loss": 4.7196, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.000262699564586357, |
|
"loss": 4.6712, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002612481857764877, |
|
"loss": 4.6983, |
|
"step": 1012 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002597968069666183, |
|
"loss": 4.667, |
|
"step": 1013 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002583454281567489, |
|
"loss": 4.659, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002568940493468795, |
|
"loss": 4.8124, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002554426705370101, |
|
"loss": 5.1725, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00025399129172714083, |
|
"loss": 4.6304, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00025253991291727143, |
|
"loss": 4.9142, |
|
"step": 1018 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00025108853410740203, |
|
"loss": 5.0531, |
|
"step": 1019 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002496371552975327, |
|
"loss": 4.648, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002481857764876633, |
|
"loss": 4.8026, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002467343976777939, |
|
"loss": 4.9464, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00024528301886792453, |
|
"loss": 5.1749, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00024383164005805516, |
|
"loss": 4.6092, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00024238026124818576, |
|
"loss": 4.744, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002409288824383164, |
|
"loss": 4.7657, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00023947750362844704, |
|
"loss": 4.7509, |
|
"step": 1027 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00023802612481857766, |
|
"loss": 4.7388, |
|
"step": 1028 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002365747460087083, |
|
"loss": 4.6644, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00023512336719883889, |
|
"loss": 4.5836, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002336719883889695, |
|
"loss": 4.7619, |
|
"step": 1031 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00023222060957910016, |
|
"loss": 4.6205, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002307692307692308, |
|
"loss": 5.0488, |
|
"step": 1033 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002293178519593614, |
|
"loss": 4.5679, |
|
"step": 1034 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00022786647314949202, |
|
"loss": 4.6562, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00022641509433962264, |
|
"loss": 4.6732, |
|
"step": 1036 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002249637155297533, |
|
"loss": 4.6644, |
|
"step": 1037 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002235123367198839, |
|
"loss": 4.8933, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00022206095791001452, |
|
"loss": 4.7343, |
|
"step": 1039 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00022060957910014514, |
|
"loss": 4.6194, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00021915820029027577, |
|
"loss": 4.8073, |
|
"step": 1041 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00021770682148040637, |
|
"loss": 5.0539, |
|
"step": 1042 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00021625544267053702, |
|
"loss": 4.7011, |
|
"step": 1043 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00021480406386066765, |
|
"loss": 4.8361, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00021335268505079827, |
|
"loss": 5.0069, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00021190130624092887, |
|
"loss": 4.6283, |
|
"step": 1046 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0002104499274310595, |
|
"loss": 4.7217, |
|
"step": 1047 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00020899854862119015, |
|
"loss": 5.0962, |
|
"step": 1048 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00020754716981132078, |
|
"loss": 5.3154, |
|
"step": 1049 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00020609579100145138, |
|
"loss": 5.0557, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.000204644412191582, |
|
"loss": 4.9492, |
|
"step": 1051 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00020319303338171263, |
|
"loss": 4.7266, |
|
"step": 1052 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00020174165457184325, |
|
"loss": 4.742, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00020029027576197388, |
|
"loss": 4.863, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0001988388969521045, |
|
"loss": 4.845, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00019738751814223513, |
|
"loss": 4.9859, |
|
"step": 1056 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00019593613933236576, |
|
"loss": 4.7998, |
|
"step": 1057 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00019448476052249636, |
|
"loss": 4.8476, |
|
"step": 1058 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00019303338171262698, |
|
"loss": 4.7775, |
|
"step": 1059 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00019158200290275763, |
|
"loss": 4.9301, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00019013062409288826, |
|
"loss": 4.8106, |
|
"step": 1061 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00018867924528301889, |
|
"loss": 4.6737, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00018722786647314948, |
|
"loss": 4.9978, |
|
"step": 1063 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0001857764876632801, |
|
"loss": 4.795, |
|
"step": 1064 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00018432510885341076, |
|
"loss": 4.614, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001828737300435414, |
|
"loss": 4.5133, |
|
"step": 1066 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.000181422351233672, |
|
"loss": 4.705, |
|
"step": 1067 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00017997097242380261, |
|
"loss": 4.6891, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00017851959361393324, |
|
"loss": 4.6569, |
|
"step": 1069 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00017706821480406387, |
|
"loss": 4.8474, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001756168359941945, |
|
"loss": 4.5824, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00017416545718432512, |
|
"loss": 4.7272, |
|
"step": 1072 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00017271407837445574, |
|
"loss": 4.3551, |
|
"step": 1073 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00017126269956458637, |
|
"loss": 4.649, |
|
"step": 1074 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00016981132075471697, |
|
"loss": 4.5383, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001683599419448476, |
|
"loss": 4.871, |
|
"step": 1076 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00016690856313497825, |
|
"loss": 5.0256, |
|
"step": 1077 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00016545718432510887, |
|
"loss": 4.8263, |
|
"step": 1078 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00016400580551523947, |
|
"loss": 4.7065, |
|
"step": 1079 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001625544267053701, |
|
"loss": 4.6886, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00016110304789550072, |
|
"loss": 4.8443, |
|
"step": 1081 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015965166908563138, |
|
"loss": 4.9113, |
|
"step": 1082 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015820029027576197, |
|
"loss": 4.7974, |
|
"step": 1083 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001567489114658926, |
|
"loss": 4.6697, |
|
"step": 1084 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015529753265602323, |
|
"loss": 4.6998, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015384615384615385, |
|
"loss": 4.718, |
|
"step": 1086 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015239477503628445, |
|
"loss": 4.655, |
|
"step": 1087 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001509433962264151, |
|
"loss": 4.9084, |
|
"step": 1088 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00014949201741654573, |
|
"loss": 4.6914, |
|
"step": 1089 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00014804063860667635, |
|
"loss": 4.7079, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00014658925979680695, |
|
"loss": 4.6175, |
|
"step": 1091 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00014513788098693758, |
|
"loss": 4.4339, |
|
"step": 1092 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001436865021770682, |
|
"loss": 4.753, |
|
"step": 1093 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00014223512336719886, |
|
"loss": 4.8632, |
|
"step": 1094 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00014078374455732948, |
|
"loss": 4.9026, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013933236574746008, |
|
"loss": 5.2352, |
|
"step": 1096 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001378809869375907, |
|
"loss": 4.7769, |
|
"step": 1097 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013642960812772133, |
|
"loss": 5.0062, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.000134978229317852, |
|
"loss": 5.2523, |
|
"step": 1099 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013352685050798259, |
|
"loss": 4.704, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001320754716981132, |
|
"loss": 5.3307, |
|
"step": 1101 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00013062409288824384, |
|
"loss": 4.716, |
|
"step": 1102 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00012917271407837446, |
|
"loss": 4.8122, |
|
"step": 1103 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00012772133526850506, |
|
"loss": 4.6091, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00012626995645863572, |
|
"loss": 4.6994, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00012481857764876634, |
|
"loss": 4.5174, |
|
"step": 1106 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00012336719883889694, |
|
"loss": 5.0963, |
|
"step": 1107 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00012191582002902758, |
|
"loss": 4.7831, |
|
"step": 1108 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001204644412191582, |
|
"loss": 4.75, |
|
"step": 1109 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00011901306240928883, |
|
"loss": 4.5889, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00011756168359941944, |
|
"loss": 4.6618, |
|
"step": 1111 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00011611030478955008, |
|
"loss": 4.959, |
|
"step": 1112 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001146589259796807, |
|
"loss": 4.6879, |
|
"step": 1113 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00011320754716981132, |
|
"loss": 4.4547, |
|
"step": 1114 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00011175616835994195, |
|
"loss": 4.7914, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00011030478955007257, |
|
"loss": 4.864, |
|
"step": 1116 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00010885341074020318, |
|
"loss": 4.9186, |
|
"step": 1117 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00010740203193033382, |
|
"loss": 4.6017, |
|
"step": 1118 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00010595065312046444, |
|
"loss": 4.3441, |
|
"step": 1119 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00010449927431059508, |
|
"loss": 4.8269, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00010304789550072569, |
|
"loss": 4.8003, |
|
"step": 1121 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00010159651669085631, |
|
"loss": 4.908, |
|
"step": 1122 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00010014513788098694, |
|
"loss": 4.597, |
|
"step": 1123 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.869375907111757e-05, |
|
"loss": 4.7209, |
|
"step": 1124 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.724238026124818e-05, |
|
"loss": 5.189, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.579100145137882e-05, |
|
"loss": 4.6447, |
|
"step": 1126 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.433962264150944e-05, |
|
"loss": 4.7426, |
|
"step": 1127 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.288824383164006e-05, |
|
"loss": 4.6786, |
|
"step": 1128 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.14368650217707e-05, |
|
"loss": 4.7179, |
|
"step": 1129 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 8.998548621190131e-05, |
|
"loss": 4.8215, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 8.853410740203193e-05, |
|
"loss": 4.4143, |
|
"step": 1131 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 8.708272859216256e-05, |
|
"loss": 4.4619, |
|
"step": 1132 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 8.563134978229318e-05, |
|
"loss": 4.763, |
|
"step": 1133 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 8.41799709724238e-05, |
|
"loss": 4.586, |
|
"step": 1134 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 8.272859216255444e-05, |
|
"loss": 4.9249, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.127721335268505e-05, |
|
"loss": 4.5555, |
|
"step": 1136 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.982583454281569e-05, |
|
"loss": 4.6658, |
|
"step": 1137 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.83744557329463e-05, |
|
"loss": 4.6832, |
|
"step": 1138 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 4.5276, |
|
"step": 1139 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.547169811320755e-05, |
|
"loss": 4.512, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.402031930333818e-05, |
|
"loss": 4.9081, |
|
"step": 1141 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.256894049346879e-05, |
|
"loss": 4.6692, |
|
"step": 1142 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.111756168359943e-05, |
|
"loss": 4.7582, |
|
"step": 1143 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.966618287373004e-05, |
|
"loss": 4.8991, |
|
"step": 1144 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.821480406386067e-05, |
|
"loss": 4.5162, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.676342525399129e-05, |
|
"loss": 4.9342, |
|
"step": 1146 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.531204644412192e-05, |
|
"loss": 6.1205, |
|
"step": 1147 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.386066763425253e-05, |
|
"loss": 4.6055, |
|
"step": 1148 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.240928882438317e-05, |
|
"loss": 5.1268, |
|
"step": 1149 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.095791001451379e-05, |
|
"loss": 4.7341, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.9506531204644415e-05, |
|
"loss": 4.5723, |
|
"step": 1151 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.805515239477504e-05, |
|
"loss": 4.4349, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.660377358490566e-05, |
|
"loss": 4.6777, |
|
"step": 1153 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.5152394775036286e-05, |
|
"loss": 4.4656, |
|
"step": 1154 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.370101596516691e-05, |
|
"loss": 4.9724, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.224963715529754e-05, |
|
"loss": 4.7204, |
|
"step": 1156 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.079825834542816e-05, |
|
"loss": 4.7556, |
|
"step": 1157 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.934687953555878e-05, |
|
"loss": 4.6727, |
|
"step": 1158 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.789550072568941e-05, |
|
"loss": 4.4809, |
|
"step": 1159 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.644412191582003e-05, |
|
"loss": 4.708, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.4992743105950653e-05, |
|
"loss": 4.8091, |
|
"step": 1161 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.354136429608128e-05, |
|
"loss": 5.0111, |
|
"step": 1162 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.20899854862119e-05, |
|
"loss": 4.823, |
|
"step": 1163 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.0638606676342524e-05, |
|
"loss": 4.5235, |
|
"step": 1164 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.918722786647315e-05, |
|
"loss": 4.8935, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.7735849056603776e-05, |
|
"loss": 4.8347, |
|
"step": 1166 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.6284470246734395e-05, |
|
"loss": 4.5507, |
|
"step": 1167 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.483309143686502e-05, |
|
"loss": 4.5014, |
|
"step": 1168 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.3381712626995646e-05, |
|
"loss": 4.6697, |
|
"step": 1169 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.1930333817126266e-05, |
|
"loss": 4.5949, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.0478955007256895e-05, |
|
"loss": 4.6143, |
|
"step": 1171 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.902757619738752e-05, |
|
"loss": 4.4137, |
|
"step": 1172 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.7576197387518143e-05, |
|
"loss": 4.6779, |
|
"step": 1173 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.612481857764877e-05, |
|
"loss": 4.6309, |
|
"step": 1174 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.467343976777939e-05, |
|
"loss": 4.3231, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.3222060957910014e-05, |
|
"loss": 4.5942, |
|
"step": 1176 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.177068214804064e-05, |
|
"loss": 4.5917, |
|
"step": 1177 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.0319303338171262e-05, |
|
"loss": 5.0845, |
|
"step": 1178 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.8867924528301888e-05, |
|
"loss": 4.4308, |
|
"step": 1179 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.741654571843251e-05, |
|
"loss": 4.7573, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.5965166908563133e-05, |
|
"loss": 4.8455, |
|
"step": 1181 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.451378809869376e-05, |
|
"loss": 4.6741, |
|
"step": 1182 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.3062409288824384e-05, |
|
"loss": 4.5611, |
|
"step": 1183 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.1611030478955007e-05, |
|
"loss": 4.9078, |
|
"step": 1184 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.0159651669085631e-05, |
|
"loss": 4.7998, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.708272859216255e-06, |
|
"loss": 4.7906, |
|
"step": 1186 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 7.25689404934688e-06, |
|
"loss": 4.5753, |
|
"step": 1187 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 5.8055152394775034e-06, |
|
"loss": 4.7835, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.354136429608128e-06, |
|
"loss": 4.9311, |
|
"step": 1189 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 1189, |
|
"total_flos": 0.0, |
|
"train_loss": 5.084408325743936, |
|
"train_runtime": 6545.2766, |
|
"train_samples_per_second": 4.36, |
|
"train_steps_per_second": 0.182 |
|
} |
|
], |
|
"max_steps": 1189, |
|
"num_train_epochs": 1, |
|
"total_flos": 0.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|