adapters-llama2-bf16-QLORA-super_glue-wsc
/
trainer_state-llama2-bf16-QLORA-super_glue-wsc-sequence_classification.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 10.0, | |
"eval_steps": 1, | |
"global_step": 70, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.14285714285714285, | |
"grad_norm": 4.84375, | |
"learning_rate": 2.5e-05, | |
"loss": 0.7521, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.14285714285714285, | |
"eval_accuracy": 0.4772727272727273, | |
"eval_loss": 0.7755075097084045, | |
"eval_runtime": 1.5538, | |
"eval_samples_per_second": 84.955, | |
"eval_steps_per_second": 2.574, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.2857142857142857, | |
"grad_norm": 4.5, | |
"learning_rate": 5e-05, | |
"loss": 0.7634, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.2857142857142857, | |
"eval_accuracy": 0.44696969696969696, | |
"eval_loss": 0.780468761920929, | |
"eval_runtime": 1.5472, | |
"eval_samples_per_second": 85.314, | |
"eval_steps_per_second": 2.585, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.42857142857142855, | |
"grad_norm": 8.5, | |
"learning_rate": 4.9264705882352944e-05, | |
"loss": 0.7984, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.42857142857142855, | |
"eval_accuracy": 0.49242424242424243, | |
"eval_loss": 0.7474180459976196, | |
"eval_runtime": 1.5385, | |
"eval_samples_per_second": 85.797, | |
"eval_steps_per_second": 2.6, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.5714285714285714, | |
"grad_norm": 2.03125, | |
"learning_rate": 4.8529411764705885e-05, | |
"loss": 0.693, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.5714285714285714, | |
"eval_accuracy": 0.5606060606060606, | |
"eval_loss": 0.7217329740524292, | |
"eval_runtime": 1.5493, | |
"eval_samples_per_second": 85.202, | |
"eval_steps_per_second": 2.582, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.7142857142857143, | |
"grad_norm": 29.75, | |
"learning_rate": 4.7794117647058826e-05, | |
"loss": 0.7846, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.7142857142857143, | |
"eval_accuracy": 0.5606060606060606, | |
"eval_loss": 0.7153113484382629, | |
"eval_runtime": 1.5441, | |
"eval_samples_per_second": 85.484, | |
"eval_steps_per_second": 2.59, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.8571428571428571, | |
"grad_norm": 17.625, | |
"learning_rate": 4.705882352941177e-05, | |
"loss": 0.7596, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.8571428571428571, | |
"eval_accuracy": 0.5757575757575758, | |
"eval_loss": 0.7194039821624756, | |
"eval_runtime": 1.5001, | |
"eval_samples_per_second": 87.995, | |
"eval_steps_per_second": 2.667, | |
"step": 6 | |
}, | |
{ | |
"epoch": 1.0, | |
"grad_norm": 21.375, | |
"learning_rate": 4.632352941176471e-05, | |
"loss": 0.7446, | |
"step": 7 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_accuracy": 0.4393939393939394, | |
"eval_loss": 0.7496892809867859, | |
"eval_runtime": 1.5456, | |
"eval_samples_per_second": 85.405, | |
"eval_steps_per_second": 2.588, | |
"step": 7 | |
}, | |
{ | |
"epoch": 1.1428571428571428, | |
"grad_norm": 23.25, | |
"learning_rate": 4.558823529411765e-05, | |
"loss": 0.7586, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.1428571428571428, | |
"eval_accuracy": 0.3484848484848485, | |
"eval_loss": 0.8451675176620483, | |
"eval_runtime": 1.5464, | |
"eval_samples_per_second": 85.362, | |
"eval_steps_per_second": 2.587, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.2857142857142856, | |
"grad_norm": 15.8125, | |
"learning_rate": 4.485294117647059e-05, | |
"loss": 0.7477, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.2857142857142856, | |
"eval_accuracy": 0.38636363636363635, | |
"eval_loss": 0.8977465033531189, | |
"eval_runtime": 1.4985, | |
"eval_samples_per_second": 88.087, | |
"eval_steps_per_second": 2.669, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.4285714285714286, | |
"grad_norm": 37.5, | |
"learning_rate": 4.411764705882353e-05, | |
"loss": 0.7741, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.4285714285714286, | |
"eval_accuracy": 0.38636363636363635, | |
"eval_loss": 0.9048103094100952, | |
"eval_runtime": 1.4975, | |
"eval_samples_per_second": 88.149, | |
"eval_steps_per_second": 2.671, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.5714285714285714, | |
"grad_norm": 42.75, | |
"learning_rate": 4.3382352941176474e-05, | |
"loss": 0.8717, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.5714285714285714, | |
"eval_accuracy": 0.3712121212121212, | |
"eval_loss": 0.8705492615699768, | |
"eval_runtime": 1.4992, | |
"eval_samples_per_second": 88.05, | |
"eval_steps_per_second": 2.668, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.7142857142857144, | |
"grad_norm": 31.375, | |
"learning_rate": 4.2647058823529415e-05, | |
"loss": 0.7632, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.7142857142857144, | |
"eval_accuracy": 0.3484848484848485, | |
"eval_loss": 0.8101148009300232, | |
"eval_runtime": 1.5025, | |
"eval_samples_per_second": 87.855, | |
"eval_steps_per_second": 2.662, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.8571428571428572, | |
"grad_norm": 25.375, | |
"learning_rate": 4.1911764705882356e-05, | |
"loss": 0.7274, | |
"step": 13 | |
}, | |
{ | |
"epoch": 1.8571428571428572, | |
"eval_accuracy": 0.45454545454545453, | |
"eval_loss": 0.7472152709960938, | |
"eval_runtime": 1.5592, | |
"eval_samples_per_second": 84.66, | |
"eval_steps_per_second": 2.565, | |
"step": 13 | |
}, | |
{ | |
"epoch": 2.0, | |
"grad_norm": 2.28125, | |
"learning_rate": 4.11764705882353e-05, | |
"loss": 0.7485, | |
"step": 14 | |
}, | |
{ | |
"epoch": 2.0, | |
"eval_accuracy": 0.5227272727272727, | |
"eval_loss": 0.7126479744911194, | |
"eval_runtime": 1.5453, | |
"eval_samples_per_second": 85.419, | |
"eval_steps_per_second": 2.588, | |
"step": 14 | |
}, | |
{ | |
"epoch": 2.142857142857143, | |
"grad_norm": 4.21875, | |
"learning_rate": 4.044117647058824e-05, | |
"loss": 0.7405, | |
"step": 15 | |
}, | |
{ | |
"epoch": 2.142857142857143, | |
"eval_accuracy": 0.6060606060606061, | |
"eval_loss": 0.6938949227333069, | |
"eval_runtime": 1.4934, | |
"eval_samples_per_second": 88.387, | |
"eval_steps_per_second": 2.678, | |
"step": 15 | |
}, | |
{ | |
"epoch": 2.2857142857142856, | |
"grad_norm": 9.25, | |
"learning_rate": 3.970588235294117e-05, | |
"loss": 0.6494, | |
"step": 16 | |
}, | |
{ | |
"epoch": 2.2857142857142856, | |
"eval_accuracy": 0.6060606060606061, | |
"eval_loss": 0.6936286687850952, | |
"eval_runtime": 1.4983, | |
"eval_samples_per_second": 88.098, | |
"eval_steps_per_second": 2.67, | |
"step": 16 | |
}, | |
{ | |
"epoch": 2.4285714285714284, | |
"grad_norm": 41.75, | |
"learning_rate": 3.897058823529412e-05, | |
"loss": 0.8244, | |
"step": 17 | |
}, | |
{ | |
"epoch": 2.4285714285714284, | |
"eval_accuracy": 0.6136363636363636, | |
"eval_loss": 0.6947872042655945, | |
"eval_runtime": 1.621, | |
"eval_samples_per_second": 81.432, | |
"eval_steps_per_second": 2.468, | |
"step": 17 | |
}, | |
{ | |
"epoch": 2.571428571428571, | |
"grad_norm": 35.75, | |
"learning_rate": 3.8235294117647055e-05, | |
"loss": 0.8132, | |
"step": 18 | |
}, | |
{ | |
"epoch": 2.571428571428571, | |
"eval_accuracy": 0.6136363636363636, | |
"eval_loss": 0.693770706653595, | |
"eval_runtime": 1.549, | |
"eval_samples_per_second": 85.214, | |
"eval_steps_per_second": 2.582, | |
"step": 18 | |
}, | |
{ | |
"epoch": 2.7142857142857144, | |
"grad_norm": 13.25, | |
"learning_rate": 3.7500000000000003e-05, | |
"loss": 0.6802, | |
"step": 19 | |
}, | |
{ | |
"epoch": 2.7142857142857144, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 0.6894235610961914, | |
"eval_runtime": 1.5446, | |
"eval_samples_per_second": 85.458, | |
"eval_steps_per_second": 2.59, | |
"step": 19 | |
}, | |
{ | |
"epoch": 2.857142857142857, | |
"grad_norm": 22.0, | |
"learning_rate": 3.6764705882352945e-05, | |
"loss": 0.7468, | |
"step": 20 | |
}, | |
{ | |
"epoch": 2.857142857142857, | |
"eval_accuracy": 0.5909090909090909, | |
"eval_loss": 0.6930723190307617, | |
"eval_runtime": 1.5465, | |
"eval_samples_per_second": 85.352, | |
"eval_steps_per_second": 2.586, | |
"step": 20 | |
}, | |
{ | |
"epoch": 3.0, | |
"grad_norm": 16.875, | |
"learning_rate": 3.6029411764705886e-05, | |
"loss": 0.7489, | |
"step": 21 | |
}, | |
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.5151515151515151, | |
"eval_loss": 0.7048709988594055, | |
"eval_runtime": 1.5504, | |
"eval_samples_per_second": 85.14, | |
"eval_steps_per_second": 2.58, | |
"step": 21 | |
}, | |
{ | |
"epoch": 3.142857142857143, | |
"grad_norm": 1.578125, | |
"learning_rate": 3.529411764705883e-05, | |
"loss": 0.7081, | |
"step": 22 | |
}, | |
{ | |
"epoch": 3.142857142857143, | |
"eval_accuracy": 0.4696969696969697, | |
"eval_loss": 0.7239050269126892, | |
"eval_runtime": 1.5375, | |
"eval_samples_per_second": 85.853, | |
"eval_steps_per_second": 2.602, | |
"step": 22 | |
}, | |
{ | |
"epoch": 3.2857142857142856, | |
"grad_norm": 3.46875, | |
"learning_rate": 3.455882352941177e-05, | |
"loss": 0.7186, | |
"step": 23 | |
}, | |
{ | |
"epoch": 3.2857142857142856, | |
"eval_accuracy": 0.4318181818181818, | |
"eval_loss": 0.7504349946975708, | |
"eval_runtime": 1.5395, | |
"eval_samples_per_second": 85.744, | |
"eval_steps_per_second": 2.598, | |
"step": 23 | |
}, | |
{ | |
"epoch": 3.4285714285714284, | |
"grad_norm": 15.9375, | |
"learning_rate": 3.382352941176471e-05, | |
"loss": 0.7119, | |
"step": 24 | |
}, | |
{ | |
"epoch": 3.4285714285714284, | |
"eval_accuracy": 0.4090909090909091, | |
"eval_loss": 0.7637221813201904, | |
"eval_runtime": 1.4882, | |
"eval_samples_per_second": 88.699, | |
"eval_steps_per_second": 2.688, | |
"step": 24 | |
}, | |
{ | |
"epoch": 3.571428571428571, | |
"grad_norm": 22.5, | |
"learning_rate": 3.308823529411765e-05, | |
"loss": 0.76, | |
"step": 25 | |
}, | |
{ | |
"epoch": 3.571428571428571, | |
"eval_accuracy": 0.3939393939393939, | |
"eval_loss": 0.7662701606750488, | |
"eval_runtime": 1.5414, | |
"eval_samples_per_second": 85.634, | |
"eval_steps_per_second": 2.595, | |
"step": 25 | |
}, | |
{ | |
"epoch": 3.7142857142857144, | |
"grad_norm": 5.34375, | |
"learning_rate": 3.235294117647059e-05, | |
"loss": 0.6997, | |
"step": 26 | |
}, | |
{ | |
"epoch": 3.7142857142857144, | |
"eval_accuracy": 0.4015151515151515, | |
"eval_loss": 0.7559452056884766, | |
"eval_runtime": 1.4899, | |
"eval_samples_per_second": 88.598, | |
"eval_steps_per_second": 2.685, | |
"step": 26 | |
}, | |
{ | |
"epoch": 3.857142857142857, | |
"grad_norm": 19.0, | |
"learning_rate": 3.161764705882353e-05, | |
"loss": 0.682, | |
"step": 27 | |
}, | |
{ | |
"epoch": 3.857142857142857, | |
"eval_accuracy": 0.45454545454545453, | |
"eval_loss": 0.7377100586891174, | |
"eval_runtime": 1.5417, | |
"eval_samples_per_second": 85.62, | |
"eval_steps_per_second": 2.595, | |
"step": 27 | |
}, | |
{ | |
"epoch": 4.0, | |
"grad_norm": 16.875, | |
"learning_rate": 3.0882352941176475e-05, | |
"loss": 0.7146, | |
"step": 28 | |
}, | |
{ | |
"epoch": 4.0, | |
"eval_accuracy": 0.4772727272727273, | |
"eval_loss": 0.7146986722946167, | |
"eval_runtime": 1.5411, | |
"eval_samples_per_second": 85.651, | |
"eval_steps_per_second": 2.595, | |
"step": 28 | |
}, | |
{ | |
"epoch": 4.142857142857143, | |
"grad_norm": 9.3125, | |
"learning_rate": 3.0147058823529413e-05, | |
"loss": 0.6812, | |
"step": 29 | |
}, | |
{ | |
"epoch": 4.142857142857143, | |
"eval_accuracy": 0.5984848484848485, | |
"eval_loss": 0.6949219107627869, | |
"eval_runtime": 1.5427, | |
"eval_samples_per_second": 85.566, | |
"eval_steps_per_second": 2.593, | |
"step": 29 | |
}, | |
{ | |
"epoch": 4.285714285714286, | |
"grad_norm": 9.6875, | |
"learning_rate": 2.9411764705882354e-05, | |
"loss": 0.6863, | |
"step": 30 | |
}, | |
{ | |
"epoch": 4.285714285714286, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 0.6819572448730469, | |
"eval_runtime": 1.5428, | |
"eval_samples_per_second": 85.56, | |
"eval_steps_per_second": 2.593, | |
"step": 30 | |
}, | |
{ | |
"epoch": 4.428571428571429, | |
"grad_norm": 19.375, | |
"learning_rate": 2.8676470588235295e-05, | |
"loss": 0.7525, | |
"step": 31 | |
}, | |
{ | |
"epoch": 4.428571428571429, | |
"eval_accuracy": 0.6136363636363636, | |
"eval_loss": 0.6823641657829285, | |
"eval_runtime": 1.542, | |
"eval_samples_per_second": 85.606, | |
"eval_steps_per_second": 2.594, | |
"step": 31 | |
}, | |
{ | |
"epoch": 4.571428571428571, | |
"grad_norm": 19.75, | |
"learning_rate": 2.7941176470588236e-05, | |
"loss": 0.7368, | |
"step": 32 | |
}, | |
{ | |
"epoch": 4.571428571428571, | |
"eval_accuracy": 0.6136363636363636, | |
"eval_loss": 0.6805161237716675, | |
"eval_runtime": 1.5383, | |
"eval_samples_per_second": 85.811, | |
"eval_steps_per_second": 2.6, | |
"step": 32 | |
}, | |
{ | |
"epoch": 4.714285714285714, | |
"grad_norm": 22.375, | |
"learning_rate": 2.7205882352941174e-05, | |
"loss": 0.7341, | |
"step": 33 | |
}, | |
{ | |
"epoch": 4.714285714285714, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 0.678117573261261, | |
"eval_runtime": 1.5397, | |
"eval_samples_per_second": 85.729, | |
"eval_steps_per_second": 2.598, | |
"step": 33 | |
}, | |
{ | |
"epoch": 4.857142857142857, | |
"grad_norm": 30.0, | |
"learning_rate": 2.647058823529412e-05, | |
"loss": 0.7484, | |
"step": 34 | |
}, | |
{ | |
"epoch": 4.857142857142857, | |
"eval_accuracy": 0.6060606060606061, | |
"eval_loss": 0.6792346835136414, | |
"eval_runtime": 1.5404, | |
"eval_samples_per_second": 85.689, | |
"eval_steps_per_second": 2.597, | |
"step": 34 | |
}, | |
{ | |
"epoch": 5.0, | |
"grad_norm": 12.8125, | |
"learning_rate": 2.5735294117647057e-05, | |
"loss": 0.6807, | |
"step": 35 | |
}, | |
{ | |
"epoch": 5.0, | |
"eval_accuracy": 0.6060606060606061, | |
"eval_loss": 0.6793352961540222, | |
"eval_runtime": 1.5392, | |
"eval_samples_per_second": 85.758, | |
"eval_steps_per_second": 2.599, | |
"step": 35 | |
}, | |
{ | |
"epoch": 5.142857142857143, | |
"grad_norm": 14.5, | |
"learning_rate": 2.5e-05, | |
"loss": 0.7071, | |
"step": 36 | |
}, | |
{ | |
"epoch": 5.142857142857143, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 0.683883786201477, | |
"eval_runtime": 1.5387, | |
"eval_samples_per_second": 85.787, | |
"eval_steps_per_second": 2.6, | |
"step": 36 | |
}, | |
{ | |
"epoch": 5.285714285714286, | |
"grad_norm": 19.875, | |
"learning_rate": 2.4264705882352942e-05, | |
"loss": 0.749, | |
"step": 37 | |
}, | |
{ | |
"epoch": 5.285714285714286, | |
"eval_accuracy": 0.5681818181818182, | |
"eval_loss": 0.6915483474731445, | |
"eval_runtime": 1.5438, | |
"eval_samples_per_second": 85.504, | |
"eval_steps_per_second": 2.591, | |
"step": 37 | |
}, | |
{ | |
"epoch": 5.428571428571429, | |
"grad_norm": 6.0, | |
"learning_rate": 2.3529411764705884e-05, | |
"loss": 0.6873, | |
"step": 38 | |
}, | |
{ | |
"epoch": 5.428571428571429, | |
"eval_accuracy": 0.5151515151515151, | |
"eval_loss": 0.7027018070220947, | |
"eval_runtime": 1.5427, | |
"eval_samples_per_second": 85.565, | |
"eval_steps_per_second": 2.593, | |
"step": 38 | |
}, | |
{ | |
"epoch": 5.571428571428571, | |
"grad_norm": 2.484375, | |
"learning_rate": 2.2794117647058825e-05, | |
"loss": 0.6637, | |
"step": 39 | |
}, | |
{ | |
"epoch": 5.571428571428571, | |
"eval_accuracy": 0.45454545454545453, | |
"eval_loss": 0.7172613739967346, | |
"eval_runtime": 1.5421, | |
"eval_samples_per_second": 85.598, | |
"eval_steps_per_second": 2.594, | |
"step": 39 | |
}, | |
{ | |
"epoch": 5.714285714285714, | |
"grad_norm": 7.21875, | |
"learning_rate": 2.2058823529411766e-05, | |
"loss": 0.7068, | |
"step": 40 | |
}, | |
{ | |
"epoch": 5.714285714285714, | |
"eval_accuracy": 0.42424242424242425, | |
"eval_loss": 0.7289565205574036, | |
"eval_runtime": 1.54, | |
"eval_samples_per_second": 85.717, | |
"eval_steps_per_second": 2.597, | |
"step": 40 | |
}, | |
{ | |
"epoch": 5.857142857142857, | |
"grad_norm": 11.5625, | |
"learning_rate": 2.1323529411764707e-05, | |
"loss": 0.6884, | |
"step": 41 | |
}, | |
{ | |
"epoch": 5.857142857142857, | |
"eval_accuracy": 0.4166666666666667, | |
"eval_loss": 0.7307764887809753, | |
"eval_runtime": 1.4931, | |
"eval_samples_per_second": 88.406, | |
"eval_steps_per_second": 2.679, | |
"step": 41 | |
}, | |
{ | |
"epoch": 6.0, | |
"grad_norm": 15.9375, | |
"learning_rate": 2.058823529411765e-05, | |
"loss": 0.7043, | |
"step": 42 | |
}, | |
{ | |
"epoch": 6.0, | |
"eval_accuracy": 0.4318181818181818, | |
"eval_loss": 0.7282403707504272, | |
"eval_runtime": 1.541, | |
"eval_samples_per_second": 85.658, | |
"eval_steps_per_second": 2.596, | |
"step": 42 | |
}, | |
{ | |
"epoch": 6.142857142857143, | |
"grad_norm": 16.375, | |
"learning_rate": 1.9852941176470586e-05, | |
"loss": 0.7056, | |
"step": 43 | |
}, | |
{ | |
"epoch": 6.142857142857143, | |
"eval_accuracy": 0.4318181818181818, | |
"eval_loss": 0.7240825891494751, | |
"eval_runtime": 1.5424, | |
"eval_samples_per_second": 85.58, | |
"eval_steps_per_second": 2.593, | |
"step": 43 | |
}, | |
{ | |
"epoch": 6.285714285714286, | |
"grad_norm": 3.203125, | |
"learning_rate": 1.9117647058823528e-05, | |
"loss": 0.6781, | |
"step": 44 | |
}, | |
{ | |
"epoch": 6.285714285714286, | |
"eval_accuracy": 0.44696969696969696, | |
"eval_loss": 0.7201615571975708, | |
"eval_runtime": 1.543, | |
"eval_samples_per_second": 85.55, | |
"eval_steps_per_second": 2.592, | |
"step": 44 | |
}, | |
{ | |
"epoch": 6.428571428571429, | |
"grad_norm": 6.59375, | |
"learning_rate": 1.8382352941176472e-05, | |
"loss": 0.6747, | |
"step": 45 | |
}, | |
{ | |
"epoch": 6.428571428571429, | |
"eval_accuracy": 0.5075757575757576, | |
"eval_loss": 0.7129350304603577, | |
"eval_runtime": 1.541, | |
"eval_samples_per_second": 85.659, | |
"eval_steps_per_second": 2.596, | |
"step": 45 | |
}, | |
{ | |
"epoch": 6.571428571428571, | |
"grad_norm": 15.125, | |
"learning_rate": 1.7647058823529414e-05, | |
"loss": 0.7016, | |
"step": 46 | |
}, | |
{ | |
"epoch": 6.571428571428571, | |
"eval_accuracy": 0.5227272727272727, | |
"eval_loss": 0.7054362297058105, | |
"eval_runtime": 1.5441, | |
"eval_samples_per_second": 85.488, | |
"eval_steps_per_second": 2.591, | |
"step": 46 | |
}, | |
{ | |
"epoch": 6.714285714285714, | |
"grad_norm": 14.0625, | |
"learning_rate": 1.6911764705882355e-05, | |
"loss": 0.6824, | |
"step": 47 | |
}, | |
{ | |
"epoch": 6.714285714285714, | |
"eval_accuracy": 0.5227272727272727, | |
"eval_loss": 0.7024768590927124, | |
"eval_runtime": 1.5404, | |
"eval_samples_per_second": 85.69, | |
"eval_steps_per_second": 2.597, | |
"step": 47 | |
}, | |
{ | |
"epoch": 6.857142857142857, | |
"grad_norm": 8.625, | |
"learning_rate": 1.6176470588235296e-05, | |
"loss": 0.6925, | |
"step": 48 | |
}, | |
{ | |
"epoch": 6.857142857142857, | |
"eval_accuracy": 0.5681818181818182, | |
"eval_loss": 0.6972271203994751, | |
"eval_runtime": 1.5424, | |
"eval_samples_per_second": 85.581, | |
"eval_steps_per_second": 2.593, | |
"step": 48 | |
}, | |
{ | |
"epoch": 7.0, | |
"grad_norm": 4.78125, | |
"learning_rate": 1.5441176470588237e-05, | |
"loss": 0.7089, | |
"step": 49 | |
}, | |
{ | |
"epoch": 7.0, | |
"eval_accuracy": 0.5909090909090909, | |
"eval_loss": 0.6955847144126892, | |
"eval_runtime": 1.5433, | |
"eval_samples_per_second": 85.533, | |
"eval_steps_per_second": 2.592, | |
"step": 49 | |
}, | |
{ | |
"epoch": 7.142857142857143, | |
"grad_norm": 11.125, | |
"learning_rate": 1.4705882352941177e-05, | |
"loss": 0.6984, | |
"step": 50 | |
}, | |
{ | |
"epoch": 7.142857142857143, | |
"eval_accuracy": 0.5833333333333334, | |
"eval_loss": 0.6915394067764282, | |
"eval_runtime": 1.5436, | |
"eval_samples_per_second": 85.513, | |
"eval_steps_per_second": 2.591, | |
"step": 50 | |
}, | |
{ | |
"epoch": 7.285714285714286, | |
"grad_norm": 5.8125, | |
"learning_rate": 1.3970588235294118e-05, | |
"loss": 0.6837, | |
"step": 51 | |
}, | |
{ | |
"epoch": 7.285714285714286, | |
"eval_accuracy": 0.5984848484848485, | |
"eval_loss": 0.6907078623771667, | |
"eval_runtime": 1.5416, | |
"eval_samples_per_second": 85.628, | |
"eval_steps_per_second": 2.595, | |
"step": 51 | |
}, | |
{ | |
"epoch": 7.428571428571429, | |
"grad_norm": 11.0, | |
"learning_rate": 1.323529411764706e-05, | |
"loss": 0.6988, | |
"step": 52 | |
}, | |
{ | |
"epoch": 7.428571428571429, | |
"eval_accuracy": 0.5984848484848485, | |
"eval_loss": 0.6940903067588806, | |
"eval_runtime": 1.5431, | |
"eval_samples_per_second": 85.541, | |
"eval_steps_per_second": 2.592, | |
"step": 52 | |
}, | |
{ | |
"epoch": 7.571428571428571, | |
"grad_norm": 9.375, | |
"learning_rate": 1.25e-05, | |
"loss": 0.6552, | |
"step": 53 | |
}, | |
{ | |
"epoch": 7.571428571428571, | |
"eval_accuracy": 0.6136363636363636, | |
"eval_loss": 0.6946229934692383, | |
"eval_runtime": 1.5448, | |
"eval_samples_per_second": 85.448, | |
"eval_steps_per_second": 2.589, | |
"step": 53 | |
}, | |
{ | |
"epoch": 7.714285714285714, | |
"grad_norm": 5.5625, | |
"learning_rate": 1.1764705882352942e-05, | |
"loss": 0.6895, | |
"step": 54 | |
}, | |
{ | |
"epoch": 7.714285714285714, | |
"eval_accuracy": 0.5909090909090909, | |
"eval_loss": 0.6917613744735718, | |
"eval_runtime": 1.5421, | |
"eval_samples_per_second": 85.596, | |
"eval_steps_per_second": 2.594, | |
"step": 54 | |
}, | |
{ | |
"epoch": 7.857142857142857, | |
"grad_norm": 9.875, | |
"learning_rate": 1.1029411764705883e-05, | |
"loss": 0.7006, | |
"step": 55 | |
}, | |
{ | |
"epoch": 7.857142857142857, | |
"eval_accuracy": 0.6060606060606061, | |
"eval_loss": 0.6935369372367859, | |
"eval_runtime": 1.5407, | |
"eval_samples_per_second": 85.674, | |
"eval_steps_per_second": 2.596, | |
"step": 55 | |
}, | |
{ | |
"epoch": 8.0, | |
"grad_norm": 11.5, | |
"learning_rate": 1.0294117647058824e-05, | |
"loss": 0.726, | |
"step": 56 | |
}, | |
{ | |
"epoch": 8.0, | |
"eval_accuracy": 0.6060606060606061, | |
"eval_loss": 0.694398045539856, | |
"eval_runtime": 1.5405, | |
"eval_samples_per_second": 85.685, | |
"eval_steps_per_second": 2.597, | |
"step": 56 | |
}, | |
{ | |
"epoch": 8.142857142857142, | |
"grad_norm": 9.0, | |
"learning_rate": 9.558823529411764e-06, | |
"loss": 0.7174, | |
"step": 57 | |
}, | |
{ | |
"epoch": 8.142857142857142, | |
"eval_accuracy": 0.5757575757575758, | |
"eval_loss": 0.69677734375, | |
"eval_runtime": 1.5413, | |
"eval_samples_per_second": 85.64, | |
"eval_steps_per_second": 2.595, | |
"step": 57 | |
}, | |
{ | |
"epoch": 8.285714285714286, | |
"grad_norm": 4.59375, | |
"learning_rate": 8.823529411764707e-06, | |
"loss": 0.66, | |
"step": 58 | |
}, | |
{ | |
"epoch": 8.285714285714286, | |
"eval_accuracy": 0.5757575757575758, | |
"eval_loss": 0.6959073543548584, | |
"eval_runtime": 1.5403, | |
"eval_samples_per_second": 85.697, | |
"eval_steps_per_second": 2.597, | |
"step": 58 | |
}, | |
{ | |
"epoch": 8.428571428571429, | |
"grad_norm": 5.125, | |
"learning_rate": 8.088235294117648e-06, | |
"loss": 0.6728, | |
"step": 59 | |
}, | |
{ | |
"epoch": 8.428571428571429, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.6988636255264282, | |
"eval_runtime": 1.5407, | |
"eval_samples_per_second": 85.675, | |
"eval_steps_per_second": 2.596, | |
"step": 59 | |
}, | |
{ | |
"epoch": 8.571428571428571, | |
"grad_norm": 5.40625, | |
"learning_rate": 7.3529411764705884e-06, | |
"loss": 0.6641, | |
"step": 60 | |
}, | |
{ | |
"epoch": 8.571428571428571, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.6989879012107849, | |
"eval_runtime": 1.5379, | |
"eval_samples_per_second": 85.831, | |
"eval_steps_per_second": 2.601, | |
"step": 60 | |
}, | |
{ | |
"epoch": 8.714285714285714, | |
"grad_norm": 15.8125, | |
"learning_rate": 6.61764705882353e-06, | |
"loss": 0.7048, | |
"step": 61 | |
}, | |
{ | |
"epoch": 8.714285714285714, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.6997276544570923, | |
"eval_runtime": 1.5397, | |
"eval_samples_per_second": 85.73, | |
"eval_steps_per_second": 2.598, | |
"step": 61 | |
}, | |
{ | |
"epoch": 8.857142857142858, | |
"grad_norm": 17.375, | |
"learning_rate": 5.882352941176471e-06, | |
"loss": 0.71, | |
"step": 62 | |
}, | |
{ | |
"epoch": 8.857142857142858, | |
"eval_accuracy": 0.5757575757575758, | |
"eval_loss": 0.6996092796325684, | |
"eval_runtime": 1.5434, | |
"eval_samples_per_second": 85.525, | |
"eval_steps_per_second": 2.592, | |
"step": 62 | |
}, | |
{ | |
"epoch": 9.0, | |
"grad_norm": 21.125, | |
"learning_rate": 5.147058823529412e-06, | |
"loss": 0.7085, | |
"step": 63 | |
}, | |
{ | |
"epoch": 9.0, | |
"eval_accuracy": 0.553030303030303, | |
"eval_loss": 0.7001480460166931, | |
"eval_runtime": 1.5439, | |
"eval_samples_per_second": 85.499, | |
"eval_steps_per_second": 2.591, | |
"step": 63 | |
}, | |
{ | |
"epoch": 9.142857142857142, | |
"grad_norm": 13.625, | |
"learning_rate": 4.411764705882353e-06, | |
"loss": 0.6871, | |
"step": 64 | |
}, | |
{ | |
"epoch": 9.142857142857142, | |
"eval_accuracy": 0.5151515151515151, | |
"eval_loss": 0.7034357190132141, | |
"eval_runtime": 1.5419, | |
"eval_samples_per_second": 85.609, | |
"eval_steps_per_second": 2.594, | |
"step": 64 | |
}, | |
{ | |
"epoch": 9.285714285714286, | |
"grad_norm": 8.6875, | |
"learning_rate": 3.6764705882352942e-06, | |
"loss": 0.7011, | |
"step": 65 | |
}, | |
{ | |
"epoch": 9.285714285714286, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.702935516834259, | |
"eval_runtime": 1.5408, | |
"eval_samples_per_second": 85.67, | |
"eval_steps_per_second": 2.596, | |
"step": 65 | |
}, | |
{ | |
"epoch": 9.428571428571429, | |
"grad_norm": 1.546875, | |
"learning_rate": 2.9411764705882355e-06, | |
"loss": 0.6376, | |
"step": 66 | |
}, | |
{ | |
"epoch": 9.428571428571429, | |
"eval_accuracy": 0.5303030303030303, | |
"eval_loss": 0.7021484375, | |
"eval_runtime": 1.5437, | |
"eval_samples_per_second": 85.509, | |
"eval_steps_per_second": 2.591, | |
"step": 66 | |
}, | |
{ | |
"epoch": 9.571428571428571, | |
"grad_norm": 4.0, | |
"learning_rate": 2.2058823529411767e-06, | |
"loss": 0.6903, | |
"step": 67 | |
}, | |
{ | |
"epoch": 9.571428571428571, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.7026574015617371, | |
"eval_runtime": 1.5418, | |
"eval_samples_per_second": 85.612, | |
"eval_steps_per_second": 2.594, | |
"step": 67 | |
}, | |
{ | |
"epoch": 9.714285714285714, | |
"grad_norm": 8.1875, | |
"learning_rate": 1.4705882352941177e-06, | |
"loss": 0.6794, | |
"step": 68 | |
}, | |
{ | |
"epoch": 9.714285714285714, | |
"eval_accuracy": 0.5454545454545454, | |
"eval_loss": 0.7024709582328796, | |
"eval_runtime": 1.5407, | |
"eval_samples_per_second": 85.675, | |
"eval_steps_per_second": 2.596, | |
"step": 68 | |
}, | |
{ | |
"epoch": 9.857142857142858, | |
"grad_norm": 6.15625, | |
"learning_rate": 7.352941176470589e-07, | |
"loss": 0.6925, | |
"step": 69 | |
}, | |
{ | |
"epoch": 9.857142857142858, | |
"eval_accuracy": 0.5227272727272727, | |
"eval_loss": 0.702207624912262, | |
"eval_runtime": 1.6313, | |
"eval_samples_per_second": 80.919, | |
"eval_steps_per_second": 2.452, | |
"step": 69 | |
}, | |
{ | |
"epoch": 10.0, | |
"grad_norm": 11.375, | |
"learning_rate": 0.0, | |
"loss": 0.6948, | |
"step": 70 | |
}, | |
{ | |
"epoch": 10.0, | |
"eval_accuracy": 0.5378787878787878, | |
"eval_loss": 0.7023614645004272, | |
"eval_runtime": 1.6551, | |
"eval_samples_per_second": 79.753, | |
"eval_steps_per_second": 2.417, | |
"step": 70 | |
}, | |
{ | |
"epoch": 10.0, | |
"step": 70, | |
"total_flos": 2.236538086961971e+16, | |
"train_loss": 0.7161136295114244, | |
"train_runtime": 300.8345, | |
"train_samples_per_second": 17.485, | |
"train_steps_per_second": 0.233 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 70, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 10, | |
"save_steps": 500, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": false, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 2.236538086961971e+16, | |
"train_batch_size": 10, | |
"trial_name": null, | |
"trial_params": null | |
} | |