adapters-opt-gptq-QLORA-super_glue-boolq
/
trainer_state-opt-gptq-QLORA-super_glue-boolq-sequence_classification.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.984, | |
"eval_steps": 1, | |
"global_step": 124, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.016, | |
"grad_norm": 19.760618209838867, | |
"learning_rate": 2.5e-05, | |
"loss": 1.0115, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.016, | |
"eval_accuracy": 0.388, | |
"eval_loss": 1.2842470407485962, | |
"eval_runtime": 7.054, | |
"eval_samples_per_second": 35.441, | |
"eval_steps_per_second": 4.536, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.032, | |
"grad_norm": 26.019010543823242, | |
"learning_rate": 5e-05, | |
"loss": 1.1792, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.032, | |
"eval_accuracy": 0.388, | |
"eval_loss": 1.2652573585510254, | |
"eval_runtime": 7.0551, | |
"eval_samples_per_second": 35.435, | |
"eval_steps_per_second": 4.536, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.048, | |
"grad_norm": 18.010499954223633, | |
"learning_rate": 4.959016393442623e-05, | |
"loss": 0.9077, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.048, | |
"eval_accuracy": 0.396, | |
"eval_loss": 1.1932929754257202, | |
"eval_runtime": 6.9922, | |
"eval_samples_per_second": 35.754, | |
"eval_steps_per_second": 4.577, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.064, | |
"grad_norm": 35.78538131713867, | |
"learning_rate": 4.918032786885246e-05, | |
"loss": 1.2982, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.064, | |
"eval_accuracy": 0.392, | |
"eval_loss": 1.1161845922470093, | |
"eval_runtime": 6.9902, | |
"eval_samples_per_second": 35.764, | |
"eval_steps_per_second": 4.578, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.08, | |
"grad_norm": 18.530590057373047, | |
"learning_rate": 4.8770491803278687e-05, | |
"loss": 1.0213, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.08, | |
"eval_accuracy": 0.396, | |
"eval_loss": 1.0403300523757935, | |
"eval_runtime": 7.052, | |
"eval_samples_per_second": 35.451, | |
"eval_steps_per_second": 4.538, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.096, | |
"grad_norm": 31.981719970703125, | |
"learning_rate": 4.836065573770492e-05, | |
"loss": 1.0673, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.096, | |
"eval_accuracy": 0.412, | |
"eval_loss": 0.9646367430686951, | |
"eval_runtime": 7.0018, | |
"eval_samples_per_second": 35.705, | |
"eval_steps_per_second": 4.57, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.112, | |
"grad_norm": 17.69211196899414, | |
"learning_rate": 4.795081967213115e-05, | |
"loss": 0.8041, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.112, | |
"eval_accuracy": 0.448, | |
"eval_loss": 0.8947343826293945, | |
"eval_runtime": 7.0526, | |
"eval_samples_per_second": 35.448, | |
"eval_steps_per_second": 4.537, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.128, | |
"grad_norm": 21.216232299804688, | |
"learning_rate": 4.754098360655738e-05, | |
"loss": 0.8364, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.128, | |
"eval_accuracy": 0.468, | |
"eval_loss": 0.8569296598434448, | |
"eval_runtime": 7.0532, | |
"eval_samples_per_second": 35.445, | |
"eval_steps_per_second": 4.537, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.144, | |
"grad_norm": 28.313173294067383, | |
"learning_rate": 4.713114754098361e-05, | |
"loss": 0.9756, | |
"step": 9 | |
}, | |
{ | |
"epoch": 0.144, | |
"eval_accuracy": 0.468, | |
"eval_loss": 0.8217734098434448, | |
"eval_runtime": 7.0563, | |
"eval_samples_per_second": 35.429, | |
"eval_steps_per_second": 4.535, | |
"step": 9 | |
}, | |
{ | |
"epoch": 0.16, | |
"grad_norm": 20.08592414855957, | |
"learning_rate": 4.672131147540984e-05, | |
"loss": 0.7215, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.16, | |
"eval_accuracy": 0.476, | |
"eval_loss": 0.7888495922088623, | |
"eval_runtime": 7.0604, | |
"eval_samples_per_second": 35.409, | |
"eval_steps_per_second": 4.532, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.176, | |
"grad_norm": 22.27912712097168, | |
"learning_rate": 4.631147540983607e-05, | |
"loss": 0.9865, | |
"step": 11 | |
}, | |
{ | |
"epoch": 0.176, | |
"eval_accuracy": 0.492, | |
"eval_loss": 0.7644336223602295, | |
"eval_runtime": 7.0442, | |
"eval_samples_per_second": 35.49, | |
"eval_steps_per_second": 4.543, | |
"step": 11 | |
}, | |
{ | |
"epoch": 0.192, | |
"grad_norm": 9.316252708435059, | |
"learning_rate": 4.59016393442623e-05, | |
"loss": 0.8134, | |
"step": 12 | |
}, | |
{ | |
"epoch": 0.192, | |
"eval_accuracy": 0.492, | |
"eval_loss": 0.7514082193374634, | |
"eval_runtime": 7.0626, | |
"eval_samples_per_second": 35.398, | |
"eval_steps_per_second": 4.531, | |
"step": 12 | |
}, | |
{ | |
"epoch": 0.208, | |
"grad_norm": 22.068655014038086, | |
"learning_rate": 4.549180327868853e-05, | |
"loss": 1.0133, | |
"step": 13 | |
}, | |
{ | |
"epoch": 0.208, | |
"eval_accuracy": 0.504, | |
"eval_loss": 0.743643581867218, | |
"eval_runtime": 7.0467, | |
"eval_samples_per_second": 35.478, | |
"eval_steps_per_second": 4.541, | |
"step": 13 | |
}, | |
{ | |
"epoch": 0.224, | |
"grad_norm": 13.667263984680176, | |
"learning_rate": 4.508196721311476e-05, | |
"loss": 0.6027, | |
"step": 14 | |
}, | |
{ | |
"epoch": 0.224, | |
"eval_accuracy": 0.564, | |
"eval_loss": 0.7456718683242798, | |
"eval_runtime": 7.0525, | |
"eval_samples_per_second": 35.448, | |
"eval_steps_per_second": 4.537, | |
"step": 14 | |
}, | |
{ | |
"epoch": 0.24, | |
"grad_norm": 8.404646873474121, | |
"learning_rate": 4.467213114754098e-05, | |
"loss": 0.6702, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.24, | |
"eval_accuracy": 0.632, | |
"eval_loss": 0.7482665777206421, | |
"eval_runtime": 7.0041, | |
"eval_samples_per_second": 35.694, | |
"eval_steps_per_second": 4.569, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.256, | |
"grad_norm": 10.424461364746094, | |
"learning_rate": 4.426229508196721e-05, | |
"loss": 0.5514, | |
"step": 16 | |
}, | |
{ | |
"epoch": 0.256, | |
"eval_accuracy": 0.612, | |
"eval_loss": 0.7656826376914978, | |
"eval_runtime": 7.0609, | |
"eval_samples_per_second": 35.406, | |
"eval_steps_per_second": 4.532, | |
"step": 16 | |
}, | |
{ | |
"epoch": 0.272, | |
"grad_norm": 4.860620498657227, | |
"learning_rate": 4.3852459016393444e-05, | |
"loss": 0.4876, | |
"step": 17 | |
}, | |
{ | |
"epoch": 0.272, | |
"eval_accuracy": 0.612, | |
"eval_loss": 0.7941005825996399, | |
"eval_runtime": 7.0565, | |
"eval_samples_per_second": 35.428, | |
"eval_steps_per_second": 4.535, | |
"step": 17 | |
}, | |
{ | |
"epoch": 0.288, | |
"grad_norm": 12.54118824005127, | |
"learning_rate": 4.3442622950819674e-05, | |
"loss": 0.8596, | |
"step": 18 | |
}, | |
{ | |
"epoch": 0.288, | |
"eval_accuracy": 0.612, | |
"eval_loss": 0.8038740158081055, | |
"eval_runtime": 7.0427, | |
"eval_samples_per_second": 35.498, | |
"eval_steps_per_second": 4.544, | |
"step": 18 | |
}, | |
{ | |
"epoch": 0.304, | |
"grad_norm": 17.676427841186523, | |
"learning_rate": 4.3032786885245904e-05, | |
"loss": 0.8936, | |
"step": 19 | |
}, | |
{ | |
"epoch": 0.304, | |
"eval_accuracy": 0.612, | |
"eval_loss": 0.7973695993423462, | |
"eval_runtime": 7.0497, | |
"eval_samples_per_second": 35.463, | |
"eval_steps_per_second": 4.539, | |
"step": 19 | |
}, | |
{ | |
"epoch": 0.32, | |
"grad_norm": 26.01458740234375, | |
"learning_rate": 4.262295081967213e-05, | |
"loss": 1.1065, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.32, | |
"eval_accuracy": 0.612, | |
"eval_loss": 0.7785317301750183, | |
"eval_runtime": 7.0561, | |
"eval_samples_per_second": 35.43, | |
"eval_steps_per_second": 4.535, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.336, | |
"grad_norm": 14.344263076782227, | |
"learning_rate": 4.2213114754098365e-05, | |
"loss": 0.7305, | |
"step": 21 | |
}, | |
{ | |
"epoch": 0.336, | |
"eval_accuracy": 0.608, | |
"eval_loss": 0.7563486099243164, | |
"eval_runtime": 7.0512, | |
"eval_samples_per_second": 35.455, | |
"eval_steps_per_second": 4.538, | |
"step": 21 | |
}, | |
{ | |
"epoch": 0.352, | |
"grad_norm": 9.17346477508545, | |
"learning_rate": 4.1803278688524595e-05, | |
"loss": 0.5736, | |
"step": 22 | |
}, | |
{ | |
"epoch": 0.352, | |
"eval_accuracy": 0.608, | |
"eval_loss": 0.7412070035934448, | |
"eval_runtime": 7.0471, | |
"eval_samples_per_second": 35.476, | |
"eval_steps_per_second": 4.541, | |
"step": 22 | |
}, | |
{ | |
"epoch": 0.368, | |
"grad_norm": 12.143023490905762, | |
"learning_rate": 4.1393442622950826e-05, | |
"loss": 0.589, | |
"step": 23 | |
}, | |
{ | |
"epoch": 0.368, | |
"eval_accuracy": 0.608, | |
"eval_loss": 0.7287958860397339, | |
"eval_runtime": 7.0441, | |
"eval_samples_per_second": 35.491, | |
"eval_steps_per_second": 4.543, | |
"step": 23 | |
}, | |
{ | |
"epoch": 0.384, | |
"grad_norm": 18.4329891204834, | |
"learning_rate": 4.098360655737705e-05, | |
"loss": 0.9443, | |
"step": 24 | |
}, | |
{ | |
"epoch": 0.384, | |
"eval_accuracy": 0.596, | |
"eval_loss": 0.714157223701477, | |
"eval_runtime": 6.9925, | |
"eval_samples_per_second": 35.753, | |
"eval_steps_per_second": 4.576, | |
"step": 24 | |
}, | |
{ | |
"epoch": 0.4, | |
"grad_norm": 17.655550003051758, | |
"learning_rate": 4.057377049180328e-05, | |
"loss": 0.6071, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.4, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.7043437361717224, | |
"eval_runtime": 7.0498, | |
"eval_samples_per_second": 35.462, | |
"eval_steps_per_second": 4.539, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.416, | |
"grad_norm": 5.435880184173584, | |
"learning_rate": 4.016393442622951e-05, | |
"loss": 0.5661, | |
"step": 26 | |
}, | |
{ | |
"epoch": 0.416, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6993535161018372, | |
"eval_runtime": 6.9976, | |
"eval_samples_per_second": 35.726, | |
"eval_steps_per_second": 4.573, | |
"step": 26 | |
}, | |
{ | |
"epoch": 0.432, | |
"grad_norm": 7.0688395500183105, | |
"learning_rate": 3.975409836065574e-05, | |
"loss": 0.6571, | |
"step": 27 | |
}, | |
{ | |
"epoch": 0.432, | |
"eval_accuracy": 0.6, | |
"eval_loss": 0.6944531202316284, | |
"eval_runtime": 7.0588, | |
"eval_samples_per_second": 35.417, | |
"eval_steps_per_second": 4.533, | |
"step": 27 | |
}, | |
{ | |
"epoch": 0.448, | |
"grad_norm": 4.792002201080322, | |
"learning_rate": 3.934426229508197e-05, | |
"loss": 0.7296, | |
"step": 28 | |
}, | |
{ | |
"epoch": 0.448, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6921367049217224, | |
"eval_runtime": 6.9708, | |
"eval_samples_per_second": 35.864, | |
"eval_steps_per_second": 4.591, | |
"step": 28 | |
}, | |
{ | |
"epoch": 0.464, | |
"grad_norm": 8.166378021240234, | |
"learning_rate": 3.89344262295082e-05, | |
"loss": 0.6252, | |
"step": 29 | |
}, | |
{ | |
"epoch": 0.464, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6932871341705322, | |
"eval_runtime": 7.0129, | |
"eval_samples_per_second": 35.649, | |
"eval_steps_per_second": 4.563, | |
"step": 29 | |
}, | |
{ | |
"epoch": 0.48, | |
"grad_norm": 13.18557071685791, | |
"learning_rate": 3.8524590163934424e-05, | |
"loss": 0.7621, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.48, | |
"eval_accuracy": 0.564, | |
"eval_loss": 0.6983827948570251, | |
"eval_runtime": 7.0198, | |
"eval_samples_per_second": 35.613, | |
"eval_steps_per_second": 4.559, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.496, | |
"grad_norm": 5.377532482147217, | |
"learning_rate": 3.8114754098360655e-05, | |
"loss": 0.6777, | |
"step": 31 | |
}, | |
{ | |
"epoch": 0.496, | |
"eval_accuracy": 0.564, | |
"eval_loss": 0.703988254070282, | |
"eval_runtime": 7.0118, | |
"eval_samples_per_second": 35.654, | |
"eval_steps_per_second": 4.564, | |
"step": 31 | |
}, | |
{ | |
"epoch": 0.512, | |
"grad_norm": 9.753649711608887, | |
"learning_rate": 3.7704918032786885e-05, | |
"loss": 0.6802, | |
"step": 32 | |
}, | |
{ | |
"epoch": 0.512, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.707226574420929, | |
"eval_runtime": 7.0122, | |
"eval_samples_per_second": 35.652, | |
"eval_steps_per_second": 4.563, | |
"step": 32 | |
}, | |
{ | |
"epoch": 0.528, | |
"grad_norm": 4.363128662109375, | |
"learning_rate": 3.729508196721312e-05, | |
"loss": 0.4794, | |
"step": 33 | |
}, | |
{ | |
"epoch": 0.528, | |
"eval_accuracy": 0.576, | |
"eval_loss": 0.7095351815223694, | |
"eval_runtime": 6.9626, | |
"eval_samples_per_second": 35.906, | |
"eval_steps_per_second": 4.596, | |
"step": 33 | |
}, | |
{ | |
"epoch": 0.544, | |
"grad_norm": 9.72979736328125, | |
"learning_rate": 3.6885245901639346e-05, | |
"loss": 0.5588, | |
"step": 34 | |
}, | |
{ | |
"epoch": 0.544, | |
"eval_accuracy": 0.576, | |
"eval_loss": 0.709597647190094, | |
"eval_runtime": 7.0132, | |
"eval_samples_per_second": 35.647, | |
"eval_steps_per_second": 4.563, | |
"step": 34 | |
}, | |
{ | |
"epoch": 0.56, | |
"grad_norm": 8.723620414733887, | |
"learning_rate": 3.6475409836065576e-05, | |
"loss": 0.7186, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.56, | |
"eval_accuracy": 0.572, | |
"eval_loss": 0.7086992263793945, | |
"eval_runtime": 7.015, | |
"eval_samples_per_second": 35.638, | |
"eval_steps_per_second": 4.562, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.576, | |
"grad_norm": 8.50931453704834, | |
"learning_rate": 3.6065573770491806e-05, | |
"loss": 0.8171, | |
"step": 36 | |
}, | |
{ | |
"epoch": 0.576, | |
"eval_accuracy": 0.576, | |
"eval_loss": 0.7101796865463257, | |
"eval_runtime": 7.0197, | |
"eval_samples_per_second": 35.614, | |
"eval_steps_per_second": 4.559, | |
"step": 36 | |
}, | |
{ | |
"epoch": 0.592, | |
"grad_norm": 7.431636333465576, | |
"learning_rate": 3.5655737704918037e-05, | |
"loss": 0.828, | |
"step": 37 | |
}, | |
{ | |
"epoch": 0.592, | |
"eval_accuracy": 0.568, | |
"eval_loss": 0.7123827934265137, | |
"eval_runtime": 7.013, | |
"eval_samples_per_second": 35.648, | |
"eval_steps_per_second": 4.563, | |
"step": 37 | |
}, | |
{ | |
"epoch": 0.608, | |
"grad_norm": 14.679248809814453, | |
"learning_rate": 3.524590163934427e-05, | |
"loss": 0.7014, | |
"step": 38 | |
}, | |
{ | |
"epoch": 0.608, | |
"eval_accuracy": 0.568, | |
"eval_loss": 0.7120000123977661, | |
"eval_runtime": 7.0123, | |
"eval_samples_per_second": 35.652, | |
"eval_steps_per_second": 4.563, | |
"step": 38 | |
}, | |
{ | |
"epoch": 0.624, | |
"grad_norm": 22.605974197387695, | |
"learning_rate": 3.483606557377049e-05, | |
"loss": 0.8848, | |
"step": 39 | |
}, | |
{ | |
"epoch": 0.624, | |
"eval_accuracy": 0.576, | |
"eval_loss": 0.7097578048706055, | |
"eval_runtime": 7.0118, | |
"eval_samples_per_second": 35.654, | |
"eval_steps_per_second": 4.564, | |
"step": 39 | |
}, | |
{ | |
"epoch": 0.64, | |
"grad_norm": 10.227527618408203, | |
"learning_rate": 3.442622950819672e-05, | |
"loss": 0.7408, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.64, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.7055820226669312, | |
"eval_runtime": 7.0124, | |
"eval_samples_per_second": 35.651, | |
"eval_steps_per_second": 4.563, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.656, | |
"grad_norm": 14.50621223449707, | |
"learning_rate": 3.401639344262295e-05, | |
"loss": 0.741, | |
"step": 41 | |
}, | |
{ | |
"epoch": 0.656, | |
"eval_accuracy": 0.576, | |
"eval_loss": 0.7033867239952087, | |
"eval_runtime": 7.0125, | |
"eval_samples_per_second": 35.651, | |
"eval_steps_per_second": 4.563, | |
"step": 41 | |
}, | |
{ | |
"epoch": 0.672, | |
"grad_norm": 17.723159790039062, | |
"learning_rate": 3.360655737704918e-05, | |
"loss": 0.7386, | |
"step": 42 | |
}, | |
{ | |
"epoch": 0.672, | |
"eval_accuracy": 0.564, | |
"eval_loss": 0.7003242373466492, | |
"eval_runtime": 7.0141, | |
"eval_samples_per_second": 35.643, | |
"eval_steps_per_second": 4.562, | |
"step": 42 | |
}, | |
{ | |
"epoch": 0.688, | |
"grad_norm": 9.643174171447754, | |
"learning_rate": 3.319672131147541e-05, | |
"loss": 0.806, | |
"step": 43 | |
}, | |
{ | |
"epoch": 0.688, | |
"eval_accuracy": 0.56, | |
"eval_loss": 0.6969140768051147, | |
"eval_runtime": 7.0114, | |
"eval_samples_per_second": 35.656, | |
"eval_steps_per_second": 4.564, | |
"step": 43 | |
}, | |
{ | |
"epoch": 0.704, | |
"grad_norm": 8.351974487304688, | |
"learning_rate": 3.2786885245901635e-05, | |
"loss": 0.6752, | |
"step": 44 | |
}, | |
{ | |
"epoch": 0.704, | |
"eval_accuracy": 0.568, | |
"eval_loss": 0.6954609155654907, | |
"eval_runtime": 7.0193, | |
"eval_samples_per_second": 35.616, | |
"eval_steps_per_second": 4.559, | |
"step": 44 | |
}, | |
{ | |
"epoch": 0.72, | |
"grad_norm": 11.415303230285645, | |
"learning_rate": 3.237704918032787e-05, | |
"loss": 0.6256, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.72, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6936054825782776, | |
"eval_runtime": 6.9694, | |
"eval_samples_per_second": 35.871, | |
"eval_steps_per_second": 4.592, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.736, | |
"grad_norm": 6.098294734954834, | |
"learning_rate": 3.19672131147541e-05, | |
"loss": 0.6824, | |
"step": 46 | |
}, | |
{ | |
"epoch": 0.736, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6936074495315552, | |
"eval_runtime": 6.9629, | |
"eval_samples_per_second": 35.905, | |
"eval_steps_per_second": 4.596, | |
"step": 46 | |
}, | |
{ | |
"epoch": 0.752, | |
"grad_norm": 10.126526832580566, | |
"learning_rate": 3.155737704918033e-05, | |
"loss": 0.665, | |
"step": 47 | |
}, | |
{ | |
"epoch": 0.752, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6951718926429749, | |
"eval_runtime": 6.9629, | |
"eval_samples_per_second": 35.905, | |
"eval_steps_per_second": 4.596, | |
"step": 47 | |
}, | |
{ | |
"epoch": 0.768, | |
"grad_norm": 5.928945064544678, | |
"learning_rate": 3.114754098360656e-05, | |
"loss": 0.7557, | |
"step": 48 | |
}, | |
{ | |
"epoch": 0.768, | |
"eval_accuracy": 0.6, | |
"eval_loss": 0.6969941258430481, | |
"eval_runtime": 7.0149, | |
"eval_samples_per_second": 35.639, | |
"eval_steps_per_second": 4.562, | |
"step": 48 | |
}, | |
{ | |
"epoch": 0.784, | |
"grad_norm": 10.933755874633789, | |
"learning_rate": 3.073770491803279e-05, | |
"loss": 0.6899, | |
"step": 49 | |
}, | |
{ | |
"epoch": 0.784, | |
"eval_accuracy": 0.6, | |
"eval_loss": 0.6987070441246033, | |
"eval_runtime": 7.0121, | |
"eval_samples_per_second": 35.652, | |
"eval_steps_per_second": 4.564, | |
"step": 49 | |
}, | |
{ | |
"epoch": 0.8, | |
"grad_norm": 12.806472778320312, | |
"learning_rate": 3.0327868852459017e-05, | |
"loss": 0.7015, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.8, | |
"eval_accuracy": 0.596, | |
"eval_loss": 0.7015273571014404, | |
"eval_runtime": 7.0147, | |
"eval_samples_per_second": 35.639, | |
"eval_steps_per_second": 4.562, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.816, | |
"grad_norm": 6.302614688873291, | |
"learning_rate": 2.9918032786885248e-05, | |
"loss": 0.6381, | |
"step": 51 | |
}, | |
{ | |
"epoch": 0.816, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.7052890658378601, | |
"eval_runtime": 7.0149, | |
"eval_samples_per_second": 35.638, | |
"eval_steps_per_second": 4.562, | |
"step": 51 | |
}, | |
{ | |
"epoch": 0.832, | |
"grad_norm": 16.82048225402832, | |
"learning_rate": 2.9508196721311478e-05, | |
"loss": 0.7628, | |
"step": 52 | |
}, | |
{ | |
"epoch": 0.832, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.7072167992591858, | |
"eval_runtime": 7.0181, | |
"eval_samples_per_second": 35.622, | |
"eval_steps_per_second": 4.56, | |
"step": 52 | |
}, | |
{ | |
"epoch": 0.848, | |
"grad_norm": 13.512633323669434, | |
"learning_rate": 2.9098360655737705e-05, | |
"loss": 0.6501, | |
"step": 53 | |
}, | |
{ | |
"epoch": 0.848, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.7064375281333923, | |
"eval_runtime": 7.0167, | |
"eval_samples_per_second": 35.629, | |
"eval_steps_per_second": 4.561, | |
"step": 53 | |
}, | |
{ | |
"epoch": 0.864, | |
"grad_norm": 13.333196640014648, | |
"learning_rate": 2.8688524590163935e-05, | |
"loss": 0.6324, | |
"step": 54 | |
}, | |
{ | |
"epoch": 0.864, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.7091484665870667, | |
"eval_runtime": 7.0246, | |
"eval_samples_per_second": 35.589, | |
"eval_steps_per_second": 4.555, | |
"step": 54 | |
}, | |
{ | |
"epoch": 0.88, | |
"grad_norm": 18.334705352783203, | |
"learning_rate": 2.8278688524590162e-05, | |
"loss": 0.8383, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.88, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.7081835865974426, | |
"eval_runtime": 7.0146, | |
"eval_samples_per_second": 35.64, | |
"eval_steps_per_second": 4.562, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.896, | |
"grad_norm": 18.812061309814453, | |
"learning_rate": 2.7868852459016392e-05, | |
"loss": 0.8253, | |
"step": 56 | |
}, | |
{ | |
"epoch": 0.896, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.7055683732032776, | |
"eval_runtime": 7.0156, | |
"eval_samples_per_second": 35.635, | |
"eval_steps_per_second": 4.561, | |
"step": 56 | |
}, | |
{ | |
"epoch": 0.912, | |
"grad_norm": 17.45163345336914, | |
"learning_rate": 2.7459016393442626e-05, | |
"loss": 0.8982, | |
"step": 57 | |
}, | |
{ | |
"epoch": 0.912, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.7019336223602295, | |
"eval_runtime": 7.0165, | |
"eval_samples_per_second": 35.63, | |
"eval_steps_per_second": 4.561, | |
"step": 57 | |
}, | |
{ | |
"epoch": 0.928, | |
"grad_norm": 9.255583763122559, | |
"learning_rate": 2.7049180327868856e-05, | |
"loss": 0.8069, | |
"step": 58 | |
}, | |
{ | |
"epoch": 0.928, | |
"eval_accuracy": 0.6, | |
"eval_loss": 0.6979863047599792, | |
"eval_runtime": 7.0275, | |
"eval_samples_per_second": 35.574, | |
"eval_steps_per_second": 4.554, | |
"step": 58 | |
}, | |
{ | |
"epoch": 0.944, | |
"grad_norm": 6.064142227172852, | |
"learning_rate": 2.6639344262295087e-05, | |
"loss": 0.4618, | |
"step": 59 | |
}, | |
{ | |
"epoch": 0.944, | |
"eval_accuracy": 0.6, | |
"eval_loss": 0.6955175995826721, | |
"eval_runtime": 7.0188, | |
"eval_samples_per_second": 35.618, | |
"eval_steps_per_second": 4.559, | |
"step": 59 | |
}, | |
{ | |
"epoch": 0.96, | |
"grad_norm": 9.563672065734863, | |
"learning_rate": 2.6229508196721314e-05, | |
"loss": 0.7786, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.96, | |
"eval_accuracy": 0.6, | |
"eval_loss": 0.6935468912124634, | |
"eval_runtime": 7.0191, | |
"eval_samples_per_second": 35.617, | |
"eval_steps_per_second": 4.559, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.976, | |
"grad_norm": 4.209216594696045, | |
"learning_rate": 2.5819672131147544e-05, | |
"loss": 0.5239, | |
"step": 61 | |
}, | |
{ | |
"epoch": 0.976, | |
"eval_accuracy": 0.604, | |
"eval_loss": 0.6912910342216492, | |
"eval_runtime": 7.0138, | |
"eval_samples_per_second": 35.644, | |
"eval_steps_per_second": 4.562, | |
"step": 61 | |
}, | |
{ | |
"epoch": 0.992, | |
"grad_norm": 4.372480392456055, | |
"learning_rate": 2.540983606557377e-05, | |
"loss": 0.578, | |
"step": 62 | |
}, | |
{ | |
"epoch": 0.992, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6903886795043945, | |
"eval_runtime": 7.0123, | |
"eval_samples_per_second": 35.652, | |
"eval_steps_per_second": 4.563, | |
"step": 62 | |
}, | |
{ | |
"epoch": 1.008, | |
"grad_norm": 4.641872882843018, | |
"learning_rate": 2.5e-05, | |
"loss": 0.6415, | |
"step": 63 | |
}, | |
{ | |
"epoch": 1.008, | |
"eval_accuracy": 0.6, | |
"eval_loss": 0.6886054873466492, | |
"eval_runtime": 7.0156, | |
"eval_samples_per_second": 35.635, | |
"eval_steps_per_second": 4.561, | |
"step": 63 | |
}, | |
{ | |
"epoch": 1.024, | |
"grad_norm": 9.991393089294434, | |
"learning_rate": 2.459016393442623e-05, | |
"loss": 0.624, | |
"step": 64 | |
}, | |
{ | |
"epoch": 1.024, | |
"eval_accuracy": 0.596, | |
"eval_loss": 0.6881250143051147, | |
"eval_runtime": 7.0155, | |
"eval_samples_per_second": 35.636, | |
"eval_steps_per_second": 4.561, | |
"step": 64 | |
}, | |
{ | |
"epoch": 1.04, | |
"grad_norm": 5.3148980140686035, | |
"learning_rate": 2.418032786885246e-05, | |
"loss": 0.7223, | |
"step": 65 | |
}, | |
{ | |
"epoch": 1.04, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6887734532356262, | |
"eval_runtime": 7.0118, | |
"eval_samples_per_second": 35.654, | |
"eval_steps_per_second": 4.564, | |
"step": 65 | |
}, | |
{ | |
"epoch": 1.056, | |
"grad_norm": 9.664165496826172, | |
"learning_rate": 2.377049180327869e-05, | |
"loss": 0.8397, | |
"step": 66 | |
}, | |
{ | |
"epoch": 1.056, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.688064455986023, | |
"eval_runtime": 7.0139, | |
"eval_samples_per_second": 35.643, | |
"eval_steps_per_second": 4.562, | |
"step": 66 | |
}, | |
{ | |
"epoch": 1.072, | |
"grad_norm": 12.799510955810547, | |
"learning_rate": 2.336065573770492e-05, | |
"loss": 0.6944, | |
"step": 67 | |
}, | |
{ | |
"epoch": 1.072, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6884999871253967, | |
"eval_runtime": 7.0162, | |
"eval_samples_per_second": 35.632, | |
"eval_steps_per_second": 4.561, | |
"step": 67 | |
}, | |
{ | |
"epoch": 1.088, | |
"grad_norm": 13.912579536437988, | |
"learning_rate": 2.295081967213115e-05, | |
"loss": 0.8555, | |
"step": 68 | |
}, | |
{ | |
"epoch": 1.088, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6879472732543945, | |
"eval_runtime": 7.0157, | |
"eval_samples_per_second": 35.635, | |
"eval_steps_per_second": 4.561, | |
"step": 68 | |
}, | |
{ | |
"epoch": 1.104, | |
"grad_norm": 5.277914047241211, | |
"learning_rate": 2.254098360655738e-05, | |
"loss": 0.6251, | |
"step": 69 | |
}, | |
{ | |
"epoch": 1.104, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6889882683753967, | |
"eval_runtime": 7.0131, | |
"eval_samples_per_second": 35.647, | |
"eval_steps_per_second": 4.563, | |
"step": 69 | |
}, | |
{ | |
"epoch": 1.12, | |
"grad_norm": 8.122929573059082, | |
"learning_rate": 2.2131147540983607e-05, | |
"loss": 0.6682, | |
"step": 70 | |
}, | |
{ | |
"epoch": 1.12, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.689697265625, | |
"eval_runtime": 7.0187, | |
"eval_samples_per_second": 35.619, | |
"eval_steps_per_second": 4.559, | |
"step": 70 | |
}, | |
{ | |
"epoch": 1.1360000000000001, | |
"grad_norm": 10.972965240478516, | |
"learning_rate": 2.1721311475409837e-05, | |
"loss": 0.6869, | |
"step": 71 | |
}, | |
{ | |
"epoch": 1.1360000000000001, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6901894807815552, | |
"eval_runtime": 7.0124, | |
"eval_samples_per_second": 35.651, | |
"eval_steps_per_second": 4.563, | |
"step": 71 | |
}, | |
{ | |
"epoch": 1.152, | |
"grad_norm": 8.138443946838379, | |
"learning_rate": 2.1311475409836064e-05, | |
"loss": 0.4818, | |
"step": 72 | |
}, | |
{ | |
"epoch": 1.152, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6901718974113464, | |
"eval_runtime": 7.0104, | |
"eval_samples_per_second": 35.661, | |
"eval_steps_per_second": 4.565, | |
"step": 72 | |
}, | |
{ | |
"epoch": 1.168, | |
"grad_norm": 12.203225135803223, | |
"learning_rate": 2.0901639344262298e-05, | |
"loss": 0.7419, | |
"step": 73 | |
}, | |
{ | |
"epoch": 1.168, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6908398270606995, | |
"eval_runtime": 7.0172, | |
"eval_samples_per_second": 35.627, | |
"eval_steps_per_second": 4.56, | |
"step": 73 | |
}, | |
{ | |
"epoch": 1.184, | |
"grad_norm": 8.825016975402832, | |
"learning_rate": 2.0491803278688525e-05, | |
"loss": 0.8366, | |
"step": 74 | |
}, | |
{ | |
"epoch": 1.184, | |
"eval_accuracy": 0.576, | |
"eval_loss": 0.689871072769165, | |
"eval_runtime": 7.0114, | |
"eval_samples_per_second": 35.656, | |
"eval_steps_per_second": 4.564, | |
"step": 74 | |
}, | |
{ | |
"epoch": 1.2, | |
"grad_norm": 8.380965232849121, | |
"learning_rate": 2.0081967213114755e-05, | |
"loss": 0.728, | |
"step": 75 | |
}, | |
{ | |
"epoch": 1.2, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6898242235183716, | |
"eval_runtime": 7.0129, | |
"eval_samples_per_second": 35.648, | |
"eval_steps_per_second": 4.563, | |
"step": 75 | |
}, | |
{ | |
"epoch": 1.216, | |
"grad_norm": 13.098135948181152, | |
"learning_rate": 1.9672131147540985e-05, | |
"loss": 0.722, | |
"step": 76 | |
}, | |
{ | |
"epoch": 1.216, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6897304654121399, | |
"eval_runtime": 7.0172, | |
"eval_samples_per_second": 35.627, | |
"eval_steps_per_second": 4.56, | |
"step": 76 | |
}, | |
{ | |
"epoch": 1.232, | |
"grad_norm": 7.279478549957275, | |
"learning_rate": 1.9262295081967212e-05, | |
"loss": 0.9835, | |
"step": 77 | |
}, | |
{ | |
"epoch": 1.232, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6891875267028809, | |
"eval_runtime": 7.0202, | |
"eval_samples_per_second": 35.612, | |
"eval_steps_per_second": 4.558, | |
"step": 77 | |
}, | |
{ | |
"epoch": 1.248, | |
"grad_norm": 7.378861904144287, | |
"learning_rate": 1.8852459016393442e-05, | |
"loss": 0.6647, | |
"step": 78 | |
}, | |
{ | |
"epoch": 1.248, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6887988448143005, | |
"eval_runtime": 7.01, | |
"eval_samples_per_second": 35.664, | |
"eval_steps_per_second": 4.565, | |
"step": 78 | |
}, | |
{ | |
"epoch": 1.264, | |
"grad_norm": 5.906654357910156, | |
"learning_rate": 1.8442622950819673e-05, | |
"loss": 0.7211, | |
"step": 79 | |
}, | |
{ | |
"epoch": 1.264, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6894453167915344, | |
"eval_runtime": 7.0095, | |
"eval_samples_per_second": 35.666, | |
"eval_steps_per_second": 4.565, | |
"step": 79 | |
}, | |
{ | |
"epoch": 1.28, | |
"grad_norm": 7.757331848144531, | |
"learning_rate": 1.8032786885245903e-05, | |
"loss": 0.7091, | |
"step": 80 | |
}, | |
{ | |
"epoch": 1.28, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6894394755363464, | |
"eval_runtime": 7.0159, | |
"eval_samples_per_second": 35.633, | |
"eval_steps_per_second": 4.561, | |
"step": 80 | |
}, | |
{ | |
"epoch": 1.296, | |
"grad_norm": 19.17230224609375, | |
"learning_rate": 1.7622950819672133e-05, | |
"loss": 0.843, | |
"step": 81 | |
}, | |
{ | |
"epoch": 1.296, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6897109150886536, | |
"eval_runtime": 7.016, | |
"eval_samples_per_second": 35.633, | |
"eval_steps_per_second": 4.561, | |
"step": 81 | |
}, | |
{ | |
"epoch": 1.312, | |
"grad_norm": 6.863316535949707, | |
"learning_rate": 1.721311475409836e-05, | |
"loss": 0.8185, | |
"step": 82 | |
}, | |
{ | |
"epoch": 1.312, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6896894574165344, | |
"eval_runtime": 7.0131, | |
"eval_samples_per_second": 35.648, | |
"eval_steps_per_second": 4.563, | |
"step": 82 | |
}, | |
{ | |
"epoch": 1.328, | |
"grad_norm": 6.566874980926514, | |
"learning_rate": 1.680327868852459e-05, | |
"loss": 0.7932, | |
"step": 83 | |
}, | |
{ | |
"epoch": 1.328, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.690066397190094, | |
"eval_runtime": 7.0194, | |
"eval_samples_per_second": 35.616, | |
"eval_steps_per_second": 4.559, | |
"step": 83 | |
}, | |
{ | |
"epoch": 1.3439999999999999, | |
"grad_norm": 7.54443359375, | |
"learning_rate": 1.6393442622950818e-05, | |
"loss": 0.703, | |
"step": 84 | |
}, | |
{ | |
"epoch": 1.3439999999999999, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6897050738334656, | |
"eval_runtime": 7.0114, | |
"eval_samples_per_second": 35.656, | |
"eval_steps_per_second": 4.564, | |
"step": 84 | |
}, | |
{ | |
"epoch": 1.3599999999999999, | |
"grad_norm": 13.671883583068848, | |
"learning_rate": 1.598360655737705e-05, | |
"loss": 0.7187, | |
"step": 85 | |
}, | |
{ | |
"epoch": 1.3599999999999999, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6892929673194885, | |
"eval_runtime": 7.0122, | |
"eval_samples_per_second": 35.652, | |
"eval_steps_per_second": 4.563, | |
"step": 85 | |
}, | |
{ | |
"epoch": 1.376, | |
"grad_norm": 11.296185493469238, | |
"learning_rate": 1.557377049180328e-05, | |
"loss": 0.8156, | |
"step": 86 | |
}, | |
{ | |
"epoch": 1.376, | |
"eval_accuracy": 0.58, | |
"eval_loss": 0.6897851824760437, | |
"eval_runtime": 7.0173, | |
"eval_samples_per_second": 35.626, | |
"eval_steps_per_second": 4.56, | |
"step": 86 | |
}, | |
{ | |
"epoch": 1.392, | |
"grad_norm": 12.910079002380371, | |
"learning_rate": 1.5163934426229509e-05, | |
"loss": 0.7051, | |
"step": 87 | |
}, | |
{ | |
"epoch": 1.392, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6891074180603027, | |
"eval_runtime": 7.0172, | |
"eval_samples_per_second": 35.627, | |
"eval_steps_per_second": 4.56, | |
"step": 87 | |
}, | |
{ | |
"epoch": 1.408, | |
"grad_norm": 19.143896102905273, | |
"learning_rate": 1.4754098360655739e-05, | |
"loss": 0.5656, | |
"step": 88 | |
}, | |
{ | |
"epoch": 1.408, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6875136494636536, | |
"eval_runtime": 6.966, | |
"eval_samples_per_second": 35.889, | |
"eval_steps_per_second": 4.594, | |
"step": 88 | |
}, | |
{ | |
"epoch": 1.424, | |
"grad_norm": 11.563498497009277, | |
"learning_rate": 1.4344262295081968e-05, | |
"loss": 0.6649, | |
"step": 89 | |
}, | |
{ | |
"epoch": 1.424, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.687857449054718, | |
"eval_runtime": 7.0159, | |
"eval_samples_per_second": 35.633, | |
"eval_steps_per_second": 4.561, | |
"step": 89 | |
}, | |
{ | |
"epoch": 1.44, | |
"grad_norm": 22.282468795776367, | |
"learning_rate": 1.3934426229508196e-05, | |
"loss": 0.6697, | |
"step": 90 | |
}, | |
{ | |
"epoch": 1.44, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6870273351669312, | |
"eval_runtime": 7.0118, | |
"eval_samples_per_second": 35.654, | |
"eval_steps_per_second": 4.564, | |
"step": 90 | |
}, | |
{ | |
"epoch": 1.456, | |
"grad_norm": 11.898663520812988, | |
"learning_rate": 1.3524590163934428e-05, | |
"loss": 0.7745, | |
"step": 91 | |
}, | |
{ | |
"epoch": 1.456, | |
"eval_accuracy": 0.596, | |
"eval_loss": 0.6867011785507202, | |
"eval_runtime": 7.0125, | |
"eval_samples_per_second": 35.651, | |
"eval_steps_per_second": 4.563, | |
"step": 91 | |
}, | |
{ | |
"epoch": 1.472, | |
"grad_norm": 8.677886962890625, | |
"learning_rate": 1.3114754098360657e-05, | |
"loss": 0.7695, | |
"step": 92 | |
}, | |
{ | |
"epoch": 1.472, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6872344017028809, | |
"eval_runtime": 7.0145, | |
"eval_samples_per_second": 35.64, | |
"eval_steps_per_second": 4.562, | |
"step": 92 | |
}, | |
{ | |
"epoch": 1.488, | |
"grad_norm": 4.995587348937988, | |
"learning_rate": 1.2704918032786885e-05, | |
"loss": 0.5541, | |
"step": 93 | |
}, | |
{ | |
"epoch": 1.488, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6873027086257935, | |
"eval_runtime": 7.0143, | |
"eval_samples_per_second": 35.641, | |
"eval_steps_per_second": 4.562, | |
"step": 93 | |
}, | |
{ | |
"epoch": 1.504, | |
"grad_norm": 9.183490753173828, | |
"learning_rate": 1.2295081967213116e-05, | |
"loss": 0.5547, | |
"step": 94 | |
}, | |
{ | |
"epoch": 1.504, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6867578029632568, | |
"eval_runtime": 7.0101, | |
"eval_samples_per_second": 35.663, | |
"eval_steps_per_second": 4.565, | |
"step": 94 | |
}, | |
{ | |
"epoch": 1.52, | |
"grad_norm": 17.053442001342773, | |
"learning_rate": 1.1885245901639344e-05, | |
"loss": 0.6949, | |
"step": 95 | |
}, | |
{ | |
"epoch": 1.52, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6869179606437683, | |
"eval_runtime": 7.015, | |
"eval_samples_per_second": 35.638, | |
"eval_steps_per_second": 4.562, | |
"step": 95 | |
}, | |
{ | |
"epoch": 1.536, | |
"grad_norm": 11.609779357910156, | |
"learning_rate": 1.1475409836065575e-05, | |
"loss": 0.7494, | |
"step": 96 | |
}, | |
{ | |
"epoch": 1.536, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6875781416893005, | |
"eval_runtime": 7.0146, | |
"eval_samples_per_second": 35.64, | |
"eval_steps_per_second": 4.562, | |
"step": 96 | |
}, | |
{ | |
"epoch": 1.552, | |
"grad_norm": 7.269466400146484, | |
"learning_rate": 1.1065573770491803e-05, | |
"loss": 0.7667, | |
"step": 97 | |
}, | |
{ | |
"epoch": 1.552, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6860898733139038, | |
"eval_runtime": 7.0103, | |
"eval_samples_per_second": 35.662, | |
"eval_steps_per_second": 4.565, | |
"step": 97 | |
}, | |
{ | |
"epoch": 1.568, | |
"grad_norm": 7.489114284515381, | |
"learning_rate": 1.0655737704918032e-05, | |
"loss": 0.6479, | |
"step": 98 | |
}, | |
{ | |
"epoch": 1.568, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.686941385269165, | |
"eval_runtime": 7.0121, | |
"eval_samples_per_second": 35.652, | |
"eval_steps_per_second": 4.564, | |
"step": 98 | |
}, | |
{ | |
"epoch": 1.584, | |
"grad_norm": 12.461353302001953, | |
"learning_rate": 1.0245901639344262e-05, | |
"loss": 0.5956, | |
"step": 99 | |
}, | |
{ | |
"epoch": 1.584, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6870800852775574, | |
"eval_runtime": 7.0119, | |
"eval_samples_per_second": 35.654, | |
"eval_steps_per_second": 4.564, | |
"step": 99 | |
}, | |
{ | |
"epoch": 1.6, | |
"grad_norm": 10.943467140197754, | |
"learning_rate": 9.836065573770493e-06, | |
"loss": 0.6405, | |
"step": 100 | |
}, | |
{ | |
"epoch": 1.6, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6868281364440918, | |
"eval_runtime": 7.0172, | |
"eval_samples_per_second": 35.627, | |
"eval_steps_per_second": 4.56, | |
"step": 100 | |
}, | |
{ | |
"epoch": 1.616, | |
"grad_norm": 14.788191795349121, | |
"learning_rate": 9.426229508196721e-06, | |
"loss": 0.7084, | |
"step": 101 | |
}, | |
{ | |
"epoch": 1.616, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6861425638198853, | |
"eval_runtime": 7.0188, | |
"eval_samples_per_second": 35.619, | |
"eval_steps_per_second": 4.559, | |
"step": 101 | |
}, | |
{ | |
"epoch": 1.6320000000000001, | |
"grad_norm": 8.610825538635254, | |
"learning_rate": 9.016393442622952e-06, | |
"loss": 0.5918, | |
"step": 102 | |
}, | |
{ | |
"epoch": 1.6320000000000001, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6875644326210022, | |
"eval_runtime": 7.0178, | |
"eval_samples_per_second": 35.624, | |
"eval_steps_per_second": 4.56, | |
"step": 102 | |
}, | |
{ | |
"epoch": 1.6480000000000001, | |
"grad_norm": 8.98721981048584, | |
"learning_rate": 8.60655737704918e-06, | |
"loss": 0.6359, | |
"step": 103 | |
}, | |
{ | |
"epoch": 1.6480000000000001, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.687570333480835, | |
"eval_runtime": 7.0145, | |
"eval_samples_per_second": 35.64, | |
"eval_steps_per_second": 4.562, | |
"step": 103 | |
}, | |
{ | |
"epoch": 1.6640000000000001, | |
"grad_norm": 7.4758992195129395, | |
"learning_rate": 8.196721311475409e-06, | |
"loss": 0.6349, | |
"step": 104 | |
}, | |
{ | |
"epoch": 1.6640000000000001, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6866093873977661, | |
"eval_runtime": 7.0157, | |
"eval_samples_per_second": 35.635, | |
"eval_steps_per_second": 4.561, | |
"step": 104 | |
}, | |
{ | |
"epoch": 1.6800000000000002, | |
"grad_norm": 5.330641269683838, | |
"learning_rate": 7.78688524590164e-06, | |
"loss": 0.5346, | |
"step": 105 | |
}, | |
{ | |
"epoch": 1.6800000000000002, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6873124837875366, | |
"eval_runtime": 7.013, | |
"eval_samples_per_second": 35.648, | |
"eval_steps_per_second": 4.563, | |
"step": 105 | |
}, | |
{ | |
"epoch": 1.696, | |
"grad_norm": 7.447720050811768, | |
"learning_rate": 7.3770491803278695e-06, | |
"loss": 0.7078, | |
"step": 106 | |
}, | |
{ | |
"epoch": 1.696, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6875371336936951, | |
"eval_runtime": 7.0195, | |
"eval_samples_per_second": 35.615, | |
"eval_steps_per_second": 4.559, | |
"step": 106 | |
}, | |
{ | |
"epoch": 1.712, | |
"grad_norm": 6.726109504699707, | |
"learning_rate": 6.967213114754098e-06, | |
"loss": 0.7211, | |
"step": 107 | |
}, | |
{ | |
"epoch": 1.712, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6877539157867432, | |
"eval_runtime": 7.0154, | |
"eval_samples_per_second": 35.636, | |
"eval_steps_per_second": 4.561, | |
"step": 107 | |
}, | |
{ | |
"epoch": 1.728, | |
"grad_norm": 8.027348518371582, | |
"learning_rate": 6.557377049180328e-06, | |
"loss": 0.6603, | |
"step": 108 | |
}, | |
{ | |
"epoch": 1.728, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6869003772735596, | |
"eval_runtime": 7.0216, | |
"eval_samples_per_second": 35.605, | |
"eval_steps_per_second": 4.557, | |
"step": 108 | |
}, | |
{ | |
"epoch": 1.744, | |
"grad_norm": 5.765665531158447, | |
"learning_rate": 6.147540983606558e-06, | |
"loss": 0.6232, | |
"step": 109 | |
}, | |
{ | |
"epoch": 1.744, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6870996356010437, | |
"eval_runtime": 7.0173, | |
"eval_samples_per_second": 35.626, | |
"eval_steps_per_second": 4.56, | |
"step": 109 | |
}, | |
{ | |
"epoch": 1.76, | |
"grad_norm": 7.992295742034912, | |
"learning_rate": 5.737704918032787e-06, | |
"loss": 0.5974, | |
"step": 110 | |
}, | |
{ | |
"epoch": 1.76, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6865136623382568, | |
"eval_runtime": 7.015, | |
"eval_samples_per_second": 35.638, | |
"eval_steps_per_second": 4.562, | |
"step": 110 | |
}, | |
{ | |
"epoch": 1.776, | |
"grad_norm": 7.537731647491455, | |
"learning_rate": 5.327868852459016e-06, | |
"loss": 0.7903, | |
"step": 111 | |
}, | |
{ | |
"epoch": 1.776, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6870468854904175, | |
"eval_runtime": 7.0136, | |
"eval_samples_per_second": 35.645, | |
"eval_steps_per_second": 4.563, | |
"step": 111 | |
}, | |
{ | |
"epoch": 1.792, | |
"grad_norm": 10.202421188354492, | |
"learning_rate": 4.918032786885246e-06, | |
"loss": 0.7646, | |
"step": 112 | |
}, | |
{ | |
"epoch": 1.792, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6872812509536743, | |
"eval_runtime": 7.014, | |
"eval_samples_per_second": 35.643, | |
"eval_steps_per_second": 4.562, | |
"step": 112 | |
}, | |
{ | |
"epoch": 1.808, | |
"grad_norm": 5.8284993171691895, | |
"learning_rate": 4.508196721311476e-06, | |
"loss": 0.7683, | |
"step": 113 | |
}, | |
{ | |
"epoch": 1.808, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6874414086341858, | |
"eval_runtime": 7.0182, | |
"eval_samples_per_second": 35.622, | |
"eval_steps_per_second": 4.56, | |
"step": 113 | |
}, | |
{ | |
"epoch": 1.8239999999999998, | |
"grad_norm": 14.522705078125, | |
"learning_rate": 4.098360655737704e-06, | |
"loss": 0.7617, | |
"step": 114 | |
}, | |
{ | |
"epoch": 1.8239999999999998, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.687423825263977, | |
"eval_runtime": 7.0145, | |
"eval_samples_per_second": 35.64, | |
"eval_steps_per_second": 4.562, | |
"step": 114 | |
}, | |
{ | |
"epoch": 1.8399999999999999, | |
"grad_norm": 13.447626113891602, | |
"learning_rate": 3.6885245901639347e-06, | |
"loss": 0.6276, | |
"step": 115 | |
}, | |
{ | |
"epoch": 1.8399999999999999, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6862011551856995, | |
"eval_runtime": 7.0162, | |
"eval_samples_per_second": 35.632, | |
"eval_steps_per_second": 4.561, | |
"step": 115 | |
}, | |
{ | |
"epoch": 1.8559999999999999, | |
"grad_norm": 6.554261684417725, | |
"learning_rate": 3.278688524590164e-06, | |
"loss": 0.7358, | |
"step": 116 | |
}, | |
{ | |
"epoch": 1.8559999999999999, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.686761736869812, | |
"eval_runtime": 7.0114, | |
"eval_samples_per_second": 35.656, | |
"eval_steps_per_second": 4.564, | |
"step": 116 | |
}, | |
{ | |
"epoch": 1.8719999999999999, | |
"grad_norm": 4.159426689147949, | |
"learning_rate": 2.8688524590163937e-06, | |
"loss": 0.5127, | |
"step": 117 | |
}, | |
{ | |
"epoch": 1.8719999999999999, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6867890357971191, | |
"eval_runtime": 7.0154, | |
"eval_samples_per_second": 35.636, | |
"eval_steps_per_second": 4.561, | |
"step": 117 | |
}, | |
{ | |
"epoch": 1.888, | |
"grad_norm": 8.290789604187012, | |
"learning_rate": 2.459016393442623e-06, | |
"loss": 0.8021, | |
"step": 118 | |
}, | |
{ | |
"epoch": 1.888, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6858710646629333, | |
"eval_runtime": 7.0128, | |
"eval_samples_per_second": 35.649, | |
"eval_steps_per_second": 4.563, | |
"step": 118 | |
}, | |
{ | |
"epoch": 1.904, | |
"grad_norm": 10.31582260131836, | |
"learning_rate": 2.049180327868852e-06, | |
"loss": 0.5137, | |
"step": 119 | |
}, | |
{ | |
"epoch": 1.904, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6865000128746033, | |
"eval_runtime": 7.0153, | |
"eval_samples_per_second": 35.637, | |
"eval_steps_per_second": 4.561, | |
"step": 119 | |
}, | |
{ | |
"epoch": 1.92, | |
"grad_norm": 22.238492965698242, | |
"learning_rate": 1.639344262295082e-06, | |
"loss": 0.8707, | |
"step": 120 | |
}, | |
{ | |
"epoch": 1.92, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.686654269695282, | |
"eval_runtime": 7.0123, | |
"eval_samples_per_second": 35.652, | |
"eval_steps_per_second": 4.563, | |
"step": 120 | |
}, | |
{ | |
"epoch": 1.936, | |
"grad_norm": 12.90554141998291, | |
"learning_rate": 1.2295081967213116e-06, | |
"loss": 0.5289, | |
"step": 121 | |
}, | |
{ | |
"epoch": 1.936, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6870156526565552, | |
"eval_runtime": 7.0116, | |
"eval_samples_per_second": 35.655, | |
"eval_steps_per_second": 4.564, | |
"step": 121 | |
}, | |
{ | |
"epoch": 1.952, | |
"grad_norm": 4.567208766937256, | |
"learning_rate": 8.19672131147541e-07, | |
"loss": 0.6272, | |
"step": 122 | |
}, | |
{ | |
"epoch": 1.952, | |
"eval_accuracy": 0.588, | |
"eval_loss": 0.6864765882492065, | |
"eval_runtime": 7.0146, | |
"eval_samples_per_second": 35.64, | |
"eval_steps_per_second": 4.562, | |
"step": 122 | |
}, | |
{ | |
"epoch": 1.968, | |
"grad_norm": 4.912770748138428, | |
"learning_rate": 4.098360655737705e-07, | |
"loss": 0.7425, | |
"step": 123 | |
}, | |
{ | |
"epoch": 1.968, | |
"eval_accuracy": 0.584, | |
"eval_loss": 0.6869745850563049, | |
"eval_runtime": 7.0106, | |
"eval_samples_per_second": 35.66, | |
"eval_steps_per_second": 4.565, | |
"step": 123 | |
}, | |
{ | |
"epoch": 1.984, | |
"grad_norm": 10.384734153747559, | |
"learning_rate": 0.0, | |
"loss": 0.5696, | |
"step": 124 | |
}, | |
{ | |
"epoch": 1.984, | |
"eval_accuracy": 0.592, | |
"eval_loss": 0.6866250038146973, | |
"eval_runtime": 7.0175, | |
"eval_samples_per_second": 35.625, | |
"eval_steps_per_second": 4.56, | |
"step": 124 | |
}, | |
{ | |
"epoch": 1.984, | |
"step": 124, | |
"total_flos": 41199005827072.0, | |
"train_loss": 0.7272295798024824, | |
"train_runtime": 1167.0627, | |
"train_samples_per_second": 1.714, | |
"train_steps_per_second": 0.106 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 124, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 500, | |
"total_flos": 41199005827072.0, | |
"train_batch_size": 2, | |
"trial_name": null, | |
"trial_params": null | |
} | |