|
{ |
|
"best_metric": 161.45617116397892, |
|
"best_model_checkpoint": "./checkpoint-88", |
|
"epoch": 2.3214285714285716, |
|
"global_step": 112, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0, |
|
"loss": 1.6569, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.2058823529411765e-07, |
|
"loss": 1.6564, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.411764705882353e-07, |
|
"loss": 1.6518, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 6.61764705882353e-07, |
|
"loss": 1.6237, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.823529411764706e-07, |
|
"loss": 1.7141, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.1029411764705884e-06, |
|
"loss": 1.655, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.323529411764706e-06, |
|
"loss": 1.5904, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.5441176470588234e-06, |
|
"loss": 1.6232, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.7647058823529412e-06, |
|
"loss": 1.6403, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.985294117647059e-06, |
|
"loss": 1.6065, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 2.2058823529411767e-06, |
|
"loss": 1.5947, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_loss": 1.629066824913025, |
|
"eval_runtime": 125.637, |
|
"eval_samples_per_second": 2.627, |
|
"eval_steps_per_second": 0.088, |
|
"eval_wer": 227.94188088775348, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 2.4264705882352943e-06, |
|
"loss": 1.52, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 2.647058823529412e-06, |
|
"loss": 1.4456, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 2.867647058823529e-06, |
|
"loss": 1.58, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 3.088235294117647e-06, |
|
"loss": 1.5958, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 3.308823529411765e-06, |
|
"loss": 1.4695, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.5294117647058825e-06, |
|
"loss": 1.4718, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 3.75e-06, |
|
"loss": 1.4406, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.970588235294118e-06, |
|
"loss": 1.4339, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.191176470588236e-06, |
|
"loss": 1.313, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.411764705882353e-06, |
|
"loss": 1.2825, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.632352941176471e-06, |
|
"loss": 1.3194, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_loss": 1.460469126701355, |
|
"eval_runtime": 118.4642, |
|
"eval_samples_per_second": 2.786, |
|
"eval_steps_per_second": 0.093, |
|
"eval_wer": 235.94124221619032, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.852941176470589e-06, |
|
"loss": 1.2301, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 5.073529411764706e-06, |
|
"loss": 1.3231, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 5.294117647058824e-06, |
|
"loss": 1.2689, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 5.5147058823529415e-06, |
|
"loss": 1.2768, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 5.735294117647058e-06, |
|
"loss": 1.2927, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 5.955882352941176e-06, |
|
"loss": 1.3419, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 6.176470588235294e-06, |
|
"loss": 1.141, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 6.397058823529412e-06, |
|
"loss": 1.2907, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 6.61764705882353e-06, |
|
"loss": 1.2248, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 6.838235294117647e-06, |
|
"loss": 1.149, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.058823529411765e-06, |
|
"loss": 1.2636, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 1.3149287700653076, |
|
"eval_runtime": 115.745, |
|
"eval_samples_per_second": 2.851, |
|
"eval_steps_per_second": 0.095, |
|
"eval_wer": 177.19942519559316, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.2794117647058826e-06, |
|
"loss": 1.2351, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.5e-06, |
|
"loss": 1.2055, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 7.403846153846155e-06, |
|
"loss": 1.1864, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 7.307692307692308e-06, |
|
"loss": 1.148, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 7.211538461538462e-06, |
|
"loss": 1.1505, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 7.115384615384615e-06, |
|
"loss": 1.0859, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 7.01923076923077e-06, |
|
"loss": 1.119, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 6.923076923076923e-06, |
|
"loss": 1.0238, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 6.826923076923077e-06, |
|
"loss": 1.0468, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 6.730769230769231e-06, |
|
"loss": 1.061, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 6.6346153846153846e-06, |
|
"loss": 1.0238, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_loss": 1.1836069822311401, |
|
"eval_runtime": 109.1507, |
|
"eval_samples_per_second": 3.023, |
|
"eval_steps_per_second": 0.101, |
|
"eval_wer": 199.80839853105542, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 6.538461538461539e-06, |
|
"loss": 1.0016, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 6.442307692307692e-06, |
|
"loss": 0.9635, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 6.3461538461538466e-06, |
|
"loss": 0.9953, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 6.25e-06, |
|
"loss": 1.0061, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 6.153846153846154e-06, |
|
"loss": 0.9933, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 6.057692307692308e-06, |
|
"loss": 0.9479, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 5.9615384615384615e-06, |
|
"loss": 0.9132, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 5.865384615384615e-06, |
|
"loss": 1.0232, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 5.76923076923077e-06, |
|
"loss": 1.0153, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 5.673076923076923e-06, |
|
"loss": 0.9548, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 5.576923076923077e-06, |
|
"loss": 0.9306, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"eval_loss": 1.1363615989685059, |
|
"eval_runtime": 115.8861, |
|
"eval_samples_per_second": 2.848, |
|
"eval_steps_per_second": 0.095, |
|
"eval_wer": 210.21874501037843, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 5.480769230769231e-06, |
|
"loss": 0.9088, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 5.384615384615385e-06, |
|
"loss": 0.8902, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 5.288461538461539e-06, |
|
"loss": 0.861, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 5.192307692307692e-06, |
|
"loss": 0.8047, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 5.096153846153847e-06, |
|
"loss": 0.8459, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 0.8103, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.903846153846154e-06, |
|
"loss": 0.8752, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.807692307692308e-06, |
|
"loss": 0.8398, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 4.711538461538462e-06, |
|
"loss": 0.8571, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 4.615384615384616e-06, |
|
"loss": 0.8998, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.519230769230769e-06, |
|
"loss": 0.9233, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_loss": 1.1090655326843262, |
|
"eval_runtime": 104.9772, |
|
"eval_samples_per_second": 3.144, |
|
"eval_steps_per_second": 0.105, |
|
"eval_wer": 175.20357656075362, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 4.423076923076924e-06, |
|
"loss": 0.7878, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 4.3269230769230765e-06, |
|
"loss": 0.8948, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.230769230769231e-06, |
|
"loss": 0.8711, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.134615384615385e-06, |
|
"loss": 0.8292, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.0384615384615385e-06, |
|
"loss": 0.9318, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 3.942307692307692e-06, |
|
"loss": 0.9272, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 3.846153846153846e-06, |
|
"loss": 0.8923, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.75e-06, |
|
"loss": 0.9013, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 3.653846153846154e-06, |
|
"loss": 0.8659, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 3.5576923076923075e-06, |
|
"loss": 0.8422, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 3.4615384615384617e-06, |
|
"loss": 0.8482, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"eval_loss": 1.0900639295578003, |
|
"eval_runtime": 99.705, |
|
"eval_samples_per_second": 3.31, |
|
"eval_steps_per_second": 0.11, |
|
"eval_wer": 161.85534089094685, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 3.3653846153846154e-06, |
|
"loss": 0.8925, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 3.2692307692307696e-06, |
|
"loss": 0.8202, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 3.1730769230769233e-06, |
|
"loss": 0.8384, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 3.076923076923077e-06, |
|
"loss": 0.8469, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 2.9807692307692307e-06, |
|
"loss": 0.8172, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 2.884615384615385e-06, |
|
"loss": 0.8119, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 2.7884615384615386e-06, |
|
"loss": 0.7811, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 2.6923076923076923e-06, |
|
"loss": 0.819, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 2.596153846153846e-06, |
|
"loss": 0.8324, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 2.4999999999999998e-06, |
|
"loss": 0.8336, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 2.403846153846154e-06, |
|
"loss": 0.7929, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_loss": 1.0782241821289062, |
|
"eval_runtime": 111.1236, |
|
"eval_samples_per_second": 2.97, |
|
"eval_steps_per_second": 0.099, |
|
"eval_wer": 161.45617116397892, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 2.307692307692308e-06, |
|
"loss": 0.774, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 2.211538461538462e-06, |
|
"loss": 0.8693, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 2.1153846153846155e-06, |
|
"loss": 0.8628, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 2.0192307692307692e-06, |
|
"loss": 0.8199, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.923076923076923e-06, |
|
"loss": 0.7995, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.826923076923077e-06, |
|
"loss": 0.7824, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 1.7307692307692308e-06, |
|
"loss": 0.7694, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.6346153846153848e-06, |
|
"loss": 0.7526, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 1.5384615384615385e-06, |
|
"loss": 0.7037, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 1.4423076923076924e-06, |
|
"loss": 0.7366, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.3461538461538462e-06, |
|
"loss": 0.7134, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"eval_loss": 1.0737253427505493, |
|
"eval_runtime": 96.9018, |
|
"eval_samples_per_second": 3.406, |
|
"eval_steps_per_second": 0.114, |
|
"eval_wer": 177.1195912501996, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.2499999999999999e-06, |
|
"loss": 0.7738, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.153846153846154e-06, |
|
"loss": 0.7373, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 1.0576923076923078e-06, |
|
"loss": 0.7623, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 9.615384615384615e-07, |
|
"loss": 0.815, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 8.653846153846154e-07, |
|
"loss": 0.829, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 7.692307692307693e-07, |
|
"loss": 0.7113, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 6.730769230769231e-07, |
|
"loss": 0.8106, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 5.76923076923077e-07, |
|
"loss": 0.7909, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 4.807692307692307e-07, |
|
"loss": 0.7564, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 3.8461538461538463e-07, |
|
"loss": 0.8574, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.884615384615385e-07, |
|
"loss": 0.8543, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"eval_loss": 1.0717276334762573, |
|
"eval_runtime": 95.923, |
|
"eval_samples_per_second": 3.44, |
|
"eval_steps_per_second": 0.115, |
|
"eval_wer": 187.5459045186013, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.9230769230769231e-07, |
|
"loss": 0.824, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 9.615384615384616e-08, |
|
"loss": 0.8352, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"step": 112, |
|
"total_flos": 1.7415399333888e+17, |
|
"train_loss": 1.0522634956453527, |
|
"train_runtime": 1735.2585, |
|
"train_samples_per_second": 4.131, |
|
"train_steps_per_second": 0.065 |
|
} |
|
], |
|
"max_steps": 112, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 1.7415399333888e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|