adapters-opt-gptq-QLORA-super_glue-axb
/
trainer_state-opt-gptq-QLORA-super_glue-axb-sequence_classification.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 10.0, | |
"eval_steps": 1, | |
"global_step": 70, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.14285714285714285, | |
"grad_norm": 1.7043088674545288, | |
"learning_rate": 2.5e-05, | |
"loss": 0.6947, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.14285714285714285, | |
"eval_loss": 0.6769030690193176, | |
"eval_matthews_correlation": 0.09238808374290705, | |
"eval_runtime": 2.2011, | |
"eval_samples_per_second": 100.403, | |
"eval_steps_per_second": 1.817, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.2857142857142857, | |
"grad_norm": 4.877361297607422, | |
"learning_rate": 5e-05, | |
"loss": 0.7363, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.2857142857142857, | |
"eval_loss": 0.676080048084259, | |
"eval_matthews_correlation": 0.09238808374290705, | |
"eval_runtime": 2.1153, | |
"eval_samples_per_second": 104.476, | |
"eval_steps_per_second": 1.891, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.42857142857142855, | |
"grad_norm": 2.641645669937134, | |
"learning_rate": 4.9264705882352944e-05, | |
"loss": 0.6637, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.42857142857142855, | |
"eval_loss": 0.6744058728218079, | |
"eval_matthews_correlation": 0.09238808374290705, | |
"eval_runtime": 2.2596, | |
"eval_samples_per_second": 97.804, | |
"eval_steps_per_second": 1.77, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.5714285714285714, | |
"grad_norm": 2.2982332706451416, | |
"learning_rate": 4.8529411764705885e-05, | |
"loss": 0.6983, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.5714285714285714, | |
"eval_loss": 0.6736607551574707, | |
"eval_matthews_correlation": 0.09238808374290705, | |
"eval_runtime": 2.2445, | |
"eval_samples_per_second": 98.464, | |
"eval_steps_per_second": 1.782, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.7142857142857143, | |
"grad_norm": 3.595609188079834, | |
"learning_rate": 4.7794117647058826e-05, | |
"loss": 0.7507, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.7142857142857143, | |
"eval_loss": 0.6729145050048828, | |
"eval_matthews_correlation": 0.09238808374290705, | |
"eval_runtime": 2.1911, | |
"eval_samples_per_second": 100.862, | |
"eval_steps_per_second": 1.826, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.8571428571428571, | |
"grad_norm": 5.971331596374512, | |
"learning_rate": 4.705882352941177e-05, | |
"loss": 0.7397, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.8571428571428571, | |
"eval_loss": 0.6732840538024902, | |
"eval_matthews_correlation": 0.09238808374290705, | |
"eval_runtime": 2.2342, | |
"eval_samples_per_second": 98.916, | |
"eval_steps_per_second": 1.79, | |
"step": 6 | |
}, | |
{ | |
"epoch": 1.0, | |
"grad_norm": 4.127521991729736, | |
"learning_rate": 4.632352941176471e-05, | |
"loss": 0.7212, | |
"step": 7 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_loss": 0.6744644641876221, | |
"eval_matthews_correlation": 0.09238808374290705, | |
"eval_runtime": 2.3962, | |
"eval_samples_per_second": 92.229, | |
"eval_steps_per_second": 1.669, | |
"step": 7 | |
}, | |
{ | |
"epoch": 1.1428571428571428, | |
"grad_norm": 3.9879262447357178, | |
"learning_rate": 4.558823529411765e-05, | |
"loss": 0.6913, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.1428571428571428, | |
"eval_loss": 0.6768964529037476, | |
"eval_matthews_correlation": 0.09238808374290705, | |
"eval_runtime": 2.3314, | |
"eval_samples_per_second": 94.793, | |
"eval_steps_per_second": 1.716, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.2857142857142856, | |
"grad_norm": 6.443199157714844, | |
"learning_rate": 4.485294117647059e-05, | |
"loss": 0.7176, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.2857142857142856, | |
"eval_loss": 0.6795554757118225, | |
"eval_matthews_correlation": 0.11327087190726348, | |
"eval_runtime": 2.2186, | |
"eval_samples_per_second": 99.61, | |
"eval_steps_per_second": 1.803, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.4285714285714286, | |
"grad_norm": 10.946983337402344, | |
"learning_rate": 4.411764705882353e-05, | |
"loss": 0.7181, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.4285714285714286, | |
"eval_loss": 0.6791008710861206, | |
"eval_matthews_correlation": 0.11327087190726348, | |
"eval_runtime": 2.218, | |
"eval_samples_per_second": 99.641, | |
"eval_steps_per_second": 1.803, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.5714285714285714, | |
"grad_norm": 2.7849884033203125, | |
"learning_rate": 4.3382352941176474e-05, | |
"loss": 0.7125, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.5714285714285714, | |
"eval_loss": 0.6784502267837524, | |
"eval_matthews_correlation": 0.11327087190726348, | |
"eval_runtime": 2.1228, | |
"eval_samples_per_second": 104.108, | |
"eval_steps_per_second": 1.884, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.7142857142857144, | |
"grad_norm": 2.3483753204345703, | |
"learning_rate": 4.2647058823529415e-05, | |
"loss": 0.7265, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.7142857142857144, | |
"eval_loss": 0.6778128147125244, | |
"eval_matthews_correlation": 0.11327087190726348, | |
"eval_runtime": 2.2267, | |
"eval_samples_per_second": 99.251, | |
"eval_steps_per_second": 1.796, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.8571428571428572, | |
"grad_norm": 4.630273342132568, | |
"learning_rate": 4.1911764705882356e-05, | |
"loss": 0.7415, | |
"step": 13 | |
}, | |
{ | |
"epoch": 1.8571428571428572, | |
"eval_loss": 0.6771256923675537, | |
"eval_matthews_correlation": 0.11327087190726348, | |
"eval_runtime": 2.1725, | |
"eval_samples_per_second": 101.727, | |
"eval_steps_per_second": 1.841, | |
"step": 13 | |
}, | |
{ | |
"epoch": 2.0, | |
"grad_norm": 4.348604202270508, | |
"learning_rate": 4.11764705882353e-05, | |
"loss": 0.7181, | |
"step": 14 | |
}, | |
{ | |
"epoch": 2.0, | |
"eval_loss": 0.6757624745368958, | |
"eval_matthews_correlation": 0.11041472213274754, | |
"eval_runtime": 2.2246, | |
"eval_samples_per_second": 99.342, | |
"eval_steps_per_second": 1.798, | |
"step": 14 | |
}, | |
{ | |
"epoch": 2.142857142857143, | |
"grad_norm": 1.6913690567016602, | |
"learning_rate": 4.044117647058824e-05, | |
"loss": 0.7429, | |
"step": 15 | |
}, | |
{ | |
"epoch": 2.142857142857143, | |
"eval_loss": 0.6753581762313843, | |
"eval_matthews_correlation": 0.11041472213274754, | |
"eval_runtime": 2.2185, | |
"eval_samples_per_second": 99.619, | |
"eval_steps_per_second": 1.803, | |
"step": 15 | |
}, | |
{ | |
"epoch": 2.2857142857142856, | |
"grad_norm": 1.92578125, | |
"learning_rate": 3.970588235294117e-05, | |
"loss": 0.7382, | |
"step": 16 | |
}, | |
{ | |
"epoch": 2.2857142857142856, | |
"eval_loss": 0.6744953393936157, | |
"eval_matthews_correlation": 0.10763938915346385, | |
"eval_runtime": 2.2226, | |
"eval_samples_per_second": 99.431, | |
"eval_steps_per_second": 1.8, | |
"step": 16 | |
}, | |
{ | |
"epoch": 2.4285714285714284, | |
"grad_norm": 6.288422107696533, | |
"learning_rate": 3.897058823529412e-05, | |
"loss": 0.6959, | |
"step": 17 | |
}, | |
{ | |
"epoch": 2.4285714285714284, | |
"eval_loss": 0.6736353635787964, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.1746, | |
"eval_samples_per_second": 101.626, | |
"eval_steps_per_second": 1.839, | |
"step": 17 | |
}, | |
{ | |
"epoch": 2.571428571428571, | |
"grad_norm": 4.723521709442139, | |
"learning_rate": 3.8235294117647055e-05, | |
"loss": 0.7533, | |
"step": 18 | |
}, | |
{ | |
"epoch": 2.571428571428571, | |
"eval_loss": 0.6728902459144592, | |
"eval_matthews_correlation": 0.08880213151158615, | |
"eval_runtime": 2.1772, | |
"eval_samples_per_second": 101.508, | |
"eval_steps_per_second": 1.837, | |
"step": 18 | |
}, | |
{ | |
"epoch": 2.7142857142857144, | |
"grad_norm": 2.707686424255371, | |
"learning_rate": 3.7500000000000003e-05, | |
"loss": 0.686, | |
"step": 19 | |
}, | |
{ | |
"epoch": 2.7142857142857144, | |
"eval_loss": 0.6713541150093079, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2193, | |
"eval_samples_per_second": 99.58, | |
"eval_steps_per_second": 1.802, | |
"step": 19 | |
}, | |
{ | |
"epoch": 2.857142857142857, | |
"grad_norm": 1.881134271621704, | |
"learning_rate": 3.6764705882352945e-05, | |
"loss": 0.7681, | |
"step": 20 | |
}, | |
{ | |
"epoch": 2.857142857142857, | |
"eval_loss": 0.6703792214393616, | |
"eval_matthews_correlation": 0.08518920119621938, | |
"eval_runtime": 2.1741, | |
"eval_samples_per_second": 101.652, | |
"eval_steps_per_second": 1.84, | |
"step": 20 | |
}, | |
{ | |
"epoch": 3.0, | |
"grad_norm": 2.6602678298950195, | |
"learning_rate": 3.6029411764705886e-05, | |
"loss": 0.7201, | |
"step": 21 | |
}, | |
{ | |
"epoch": 3.0, | |
"eval_loss": 0.6689287424087524, | |
"eval_matthews_correlation": 0.08518920119621938, | |
"eval_runtime": 2.1719, | |
"eval_samples_per_second": 101.752, | |
"eval_steps_per_second": 1.842, | |
"step": 21 | |
}, | |
{ | |
"epoch": 3.142857142857143, | |
"grad_norm": 6.099279403686523, | |
"learning_rate": 3.529411764705883e-05, | |
"loss": 0.6883, | |
"step": 22 | |
}, | |
{ | |
"epoch": 3.142857142857143, | |
"eval_loss": 0.6681427359580994, | |
"eval_matthews_correlation": 0.0642428489899374, | |
"eval_runtime": 2.2222, | |
"eval_samples_per_second": 99.45, | |
"eval_steps_per_second": 1.8, | |
"step": 22 | |
}, | |
{ | |
"epoch": 3.2857142857142856, | |
"grad_norm": 1.872830867767334, | |
"learning_rate": 3.455882352941177e-05, | |
"loss": 0.7143, | |
"step": 23 | |
}, | |
{ | |
"epoch": 3.2857142857142856, | |
"eval_loss": 0.6677550077438354, | |
"eval_matthews_correlation": 0.04194812825413103, | |
"eval_runtime": 2.2213, | |
"eval_samples_per_second": 99.49, | |
"eval_steps_per_second": 1.801, | |
"step": 23 | |
}, | |
{ | |
"epoch": 3.4285714285714284, | |
"grad_norm": 3.6619858741760254, | |
"learning_rate": 3.382352941176471e-05, | |
"loss": 0.7078, | |
"step": 24 | |
}, | |
{ | |
"epoch": 3.4285714285714284, | |
"eval_loss": 0.6670694947242737, | |
"eval_matthews_correlation": 0.04194812825413103, | |
"eval_runtime": 2.227, | |
"eval_samples_per_second": 99.236, | |
"eval_steps_per_second": 1.796, | |
"step": 24 | |
}, | |
{ | |
"epoch": 3.571428571428571, | |
"grad_norm": 2.1140382289886475, | |
"learning_rate": 3.308823529411765e-05, | |
"loss": 0.6675, | |
"step": 25 | |
}, | |
{ | |
"epoch": 3.571428571428571, | |
"eval_loss": 0.6676743626594543, | |
"eval_matthews_correlation": 0.04194812825413103, | |
"eval_runtime": 2.1762, | |
"eval_samples_per_second": 101.552, | |
"eval_steps_per_second": 1.838, | |
"step": 25 | |
}, | |
{ | |
"epoch": 3.7142857142857144, | |
"grad_norm": 1.8528276681900024, | |
"learning_rate": 3.235294117647059e-05, | |
"loss": 0.7156, | |
"step": 26 | |
}, | |
{ | |
"epoch": 3.7142857142857144, | |
"eval_loss": 0.6686509251594543, | |
"eval_matthews_correlation": 0.0642428489899374, | |
"eval_runtime": 2.2254, | |
"eval_samples_per_second": 99.306, | |
"eval_steps_per_second": 1.797, | |
"step": 26 | |
}, | |
{ | |
"epoch": 3.857142857142857, | |
"grad_norm": 6.78858757019043, | |
"learning_rate": 3.161764705882353e-05, | |
"loss": 0.6797, | |
"step": 27 | |
}, | |
{ | |
"epoch": 3.857142857142857, | |
"eval_loss": 0.6698384881019592, | |
"eval_matthews_correlation": 0.08518920119621938, | |
"eval_runtime": 2.221, | |
"eval_samples_per_second": 99.505, | |
"eval_steps_per_second": 1.801, | |
"step": 27 | |
}, | |
{ | |
"epoch": 4.0, | |
"grad_norm": 1.9471256732940674, | |
"learning_rate": 3.0882352941176475e-05, | |
"loss": 0.7278, | |
"step": 28 | |
}, | |
{ | |
"epoch": 4.0, | |
"eval_loss": 0.6707520484924316, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.225, | |
"eval_samples_per_second": 99.328, | |
"eval_steps_per_second": 1.798, | |
"step": 28 | |
}, | |
{ | |
"epoch": 4.142857142857143, | |
"grad_norm": 8.322439193725586, | |
"learning_rate": 3.0147058823529413e-05, | |
"loss": 0.6667, | |
"step": 29 | |
}, | |
{ | |
"epoch": 4.142857142857143, | |
"eval_loss": 0.6720418334007263, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.1722, | |
"eval_samples_per_second": 101.738, | |
"eval_steps_per_second": 1.841, | |
"step": 29 | |
}, | |
{ | |
"epoch": 4.285714285714286, | |
"grad_norm": 4.192855358123779, | |
"learning_rate": 2.9411764705882354e-05, | |
"loss": 0.7602, | |
"step": 30 | |
}, | |
{ | |
"epoch": 4.285714285714286, | |
"eval_loss": 0.6730735898017883, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.1749, | |
"eval_samples_per_second": 101.616, | |
"eval_steps_per_second": 1.839, | |
"step": 30 | |
}, | |
{ | |
"epoch": 4.428571428571429, | |
"grad_norm": 3.558943033218384, | |
"learning_rate": 2.8676470588235295e-05, | |
"loss": 0.734, | |
"step": 31 | |
}, | |
{ | |
"epoch": 4.428571428571429, | |
"eval_loss": 0.6732144355773926, | |
"eval_matthews_correlation": 0.10763938915346385, | |
"eval_runtime": 2.2232, | |
"eval_samples_per_second": 99.407, | |
"eval_steps_per_second": 1.799, | |
"step": 31 | |
}, | |
{ | |
"epoch": 4.571428571428571, | |
"grad_norm": 1.8682119846343994, | |
"learning_rate": 2.7941176470588236e-05, | |
"loss": 0.6987, | |
"step": 32 | |
}, | |
{ | |
"epoch": 4.571428571428571, | |
"eval_loss": 0.6731299757957458, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.2265, | |
"eval_samples_per_second": 99.26, | |
"eval_steps_per_second": 1.797, | |
"step": 32 | |
}, | |
{ | |
"epoch": 4.714285714285714, | |
"grad_norm": 2.700486660003662, | |
"learning_rate": 2.7205882352941174e-05, | |
"loss": 0.6819, | |
"step": 33 | |
}, | |
{ | |
"epoch": 4.714285714285714, | |
"eval_loss": 0.6740529537200928, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.1707, | |
"eval_samples_per_second": 101.808, | |
"eval_steps_per_second": 1.843, | |
"step": 33 | |
}, | |
{ | |
"epoch": 4.857142857142857, | |
"grad_norm": 1.0778151750564575, | |
"learning_rate": 2.647058823529412e-05, | |
"loss": 0.6904, | |
"step": 34 | |
}, | |
{ | |
"epoch": 4.857142857142857, | |
"eval_loss": 0.6740065217018127, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.22, | |
"eval_samples_per_second": 99.549, | |
"eval_steps_per_second": 1.802, | |
"step": 34 | |
}, | |
{ | |
"epoch": 5.0, | |
"grad_norm": 6.791481971740723, | |
"learning_rate": 2.5735294117647057e-05, | |
"loss": 0.7008, | |
"step": 35 | |
}, | |
{ | |
"epoch": 5.0, | |
"eval_loss": 0.6742716431617737, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.227, | |
"eval_samples_per_second": 99.237, | |
"eval_steps_per_second": 1.796, | |
"step": 35 | |
}, | |
{ | |
"epoch": 5.142857142857143, | |
"grad_norm": 3.210080623626709, | |
"learning_rate": 2.5e-05, | |
"loss": 0.6827, | |
"step": 36 | |
}, | |
{ | |
"epoch": 5.142857142857143, | |
"eval_loss": 0.6741186380386353, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.2252, | |
"eval_samples_per_second": 99.317, | |
"eval_steps_per_second": 1.798, | |
"step": 36 | |
}, | |
{ | |
"epoch": 5.285714285714286, | |
"grad_norm": 2.8035035133361816, | |
"learning_rate": 2.4264705882352942e-05, | |
"loss": 0.7384, | |
"step": 37 | |
}, | |
{ | |
"epoch": 5.285714285714286, | |
"eval_loss": 0.6738977432250977, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.2235, | |
"eval_samples_per_second": 99.394, | |
"eval_steps_per_second": 1.799, | |
"step": 37 | |
}, | |
{ | |
"epoch": 5.428571428571429, | |
"grad_norm": 2.6973655223846436, | |
"learning_rate": 2.3529411764705884e-05, | |
"loss": 0.7279, | |
"step": 38 | |
}, | |
{ | |
"epoch": 5.428571428571429, | |
"eval_loss": 0.6738795042037964, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.2227, | |
"eval_samples_per_second": 99.427, | |
"eval_steps_per_second": 1.8, | |
"step": 38 | |
}, | |
{ | |
"epoch": 5.571428571428571, | |
"grad_norm": 2.436616897583008, | |
"learning_rate": 2.2794117647058825e-05, | |
"loss": 0.7384, | |
"step": 39 | |
}, | |
{ | |
"epoch": 5.571428571428571, | |
"eval_loss": 0.6723892688751221, | |
"eval_matthews_correlation": 0.12378478937978676, | |
"eval_runtime": 2.2269, | |
"eval_samples_per_second": 99.241, | |
"eval_steps_per_second": 1.796, | |
"step": 39 | |
}, | |
{ | |
"epoch": 5.714285714285714, | |
"grad_norm": 7.856032848358154, | |
"learning_rate": 2.2058823529411766e-05, | |
"loss": 0.7357, | |
"step": 40 | |
}, | |
{ | |
"epoch": 5.714285714285714, | |
"eval_loss": 0.6720584034919739, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.226, | |
"eval_samples_per_second": 99.28, | |
"eval_steps_per_second": 1.797, | |
"step": 40 | |
}, | |
{ | |
"epoch": 5.857142857142857, | |
"grad_norm": 2.9936225414276123, | |
"learning_rate": 2.1323529411764707e-05, | |
"loss": 0.6712, | |
"step": 41 | |
}, | |
{ | |
"epoch": 5.857142857142857, | |
"eval_loss": 0.6708343625068665, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2248, | |
"eval_samples_per_second": 99.333, | |
"eval_steps_per_second": 1.798, | |
"step": 41 | |
}, | |
{ | |
"epoch": 6.0, | |
"grad_norm": 3.1198043823242188, | |
"learning_rate": 2.058823529411765e-05, | |
"loss": 0.6939, | |
"step": 42 | |
}, | |
{ | |
"epoch": 6.0, | |
"eval_loss": 0.6708316206932068, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2268, | |
"eval_samples_per_second": 99.244, | |
"eval_steps_per_second": 1.796, | |
"step": 42 | |
}, | |
{ | |
"epoch": 6.142857142857143, | |
"grad_norm": 1.7392616271972656, | |
"learning_rate": 1.9852941176470586e-05, | |
"loss": 0.765, | |
"step": 43 | |
}, | |
{ | |
"epoch": 6.142857142857143, | |
"eval_loss": 0.6702786684036255, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.1756, | |
"eval_samples_per_second": 101.582, | |
"eval_steps_per_second": 1.839, | |
"step": 43 | |
}, | |
{ | |
"epoch": 6.285714285714286, | |
"grad_norm": 1.317233681678772, | |
"learning_rate": 1.9117647058823528e-05, | |
"loss": 0.7314, | |
"step": 44 | |
}, | |
{ | |
"epoch": 6.285714285714286, | |
"eval_loss": 0.6706194877624512, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2211, | |
"eval_samples_per_second": 99.501, | |
"eval_steps_per_second": 1.801, | |
"step": 44 | |
}, | |
{ | |
"epoch": 6.428571428571429, | |
"grad_norm": 1.396705150604248, | |
"learning_rate": 1.8382352941176472e-05, | |
"loss": 0.7446, | |
"step": 45 | |
}, | |
{ | |
"epoch": 6.428571428571429, | |
"eval_loss": 0.6693071126937866, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2256, | |
"eval_samples_per_second": 99.298, | |
"eval_steps_per_second": 1.797, | |
"step": 45 | |
}, | |
{ | |
"epoch": 6.571428571428571, | |
"grad_norm": 8.453911781311035, | |
"learning_rate": 1.7647058823529414e-05, | |
"loss": 0.6301, | |
"step": 46 | |
}, | |
{ | |
"epoch": 6.571428571428571, | |
"eval_loss": 0.6696462631225586, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.1757, | |
"eval_samples_per_second": 101.577, | |
"eval_steps_per_second": 1.838, | |
"step": 46 | |
}, | |
{ | |
"epoch": 6.714285714285714, | |
"grad_norm": 1.2325752973556519, | |
"learning_rate": 1.6911764705882355e-05, | |
"loss": 0.7086, | |
"step": 47 | |
}, | |
{ | |
"epoch": 6.714285714285714, | |
"eval_loss": 0.6690707206726074, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.1688, | |
"eval_samples_per_second": 101.902, | |
"eval_steps_per_second": 1.844, | |
"step": 47 | |
}, | |
{ | |
"epoch": 6.857142857142857, | |
"grad_norm": 1.1442803144454956, | |
"learning_rate": 1.6176470588235296e-05, | |
"loss": 0.6812, | |
"step": 48 | |
}, | |
{ | |
"epoch": 6.857142857142857, | |
"eval_loss": 0.6692214608192444, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.1724, | |
"eval_samples_per_second": 101.731, | |
"eval_steps_per_second": 1.841, | |
"step": 48 | |
}, | |
{ | |
"epoch": 7.0, | |
"grad_norm": 2.70233416557312, | |
"learning_rate": 1.5441176470588237e-05, | |
"loss": 0.7021, | |
"step": 49 | |
}, | |
{ | |
"epoch": 7.0, | |
"eval_loss": 0.6698616743087769, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2295, | |
"eval_samples_per_second": 99.127, | |
"eval_steps_per_second": 1.794, | |
"step": 49 | |
}, | |
{ | |
"epoch": 7.142857142857143, | |
"grad_norm": 1.5815727710723877, | |
"learning_rate": 1.4705882352941177e-05, | |
"loss": 0.7062, | |
"step": 50 | |
}, | |
{ | |
"epoch": 7.142857142857143, | |
"eval_loss": 0.6693673133850098, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.1742, | |
"eval_samples_per_second": 101.649, | |
"eval_steps_per_second": 1.84, | |
"step": 50 | |
}, | |
{ | |
"epoch": 7.285714285714286, | |
"grad_norm": 1.2061761617660522, | |
"learning_rate": 1.3970588235294118e-05, | |
"loss": 0.7377, | |
"step": 51 | |
}, | |
{ | |
"epoch": 7.285714285714286, | |
"eval_loss": 0.6697478890419006, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.1746, | |
"eval_samples_per_second": 101.628, | |
"eval_steps_per_second": 1.839, | |
"step": 51 | |
}, | |
{ | |
"epoch": 7.428571428571429, | |
"grad_norm": 1.4450395107269287, | |
"learning_rate": 1.323529411764706e-05, | |
"loss": 0.6958, | |
"step": 52 | |
}, | |
{ | |
"epoch": 7.428571428571429, | |
"eval_loss": 0.670606791973114, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.1743, | |
"eval_samples_per_second": 101.643, | |
"eval_steps_per_second": 1.84, | |
"step": 52 | |
}, | |
{ | |
"epoch": 7.571428571428571, | |
"grad_norm": 1.7913070917129517, | |
"learning_rate": 1.25e-05, | |
"loss": 0.7167, | |
"step": 53 | |
}, | |
{ | |
"epoch": 7.571428571428571, | |
"eval_loss": 0.6707344055175781, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2214, | |
"eval_samples_per_second": 99.486, | |
"eval_steps_per_second": 1.801, | |
"step": 53 | |
}, | |
{ | |
"epoch": 7.714285714285714, | |
"grad_norm": 1.905948519706726, | |
"learning_rate": 1.1764705882352942e-05, | |
"loss": 0.7437, | |
"step": 54 | |
}, | |
{ | |
"epoch": 7.714285714285714, | |
"eval_loss": 0.6710696816444397, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2214, | |
"eval_samples_per_second": 99.485, | |
"eval_steps_per_second": 1.801, | |
"step": 54 | |
}, | |
{ | |
"epoch": 7.857142857142857, | |
"grad_norm": 5.172789096832275, | |
"learning_rate": 1.1029411764705883e-05, | |
"loss": 0.6801, | |
"step": 55 | |
}, | |
{ | |
"epoch": 7.857142857142857, | |
"eval_loss": 0.6719600558280945, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2219, | |
"eval_samples_per_second": 99.466, | |
"eval_steps_per_second": 1.8, | |
"step": 55 | |
}, | |
{ | |
"epoch": 8.0, | |
"grad_norm": 7.617390155792236, | |
"learning_rate": 1.0294117647058824e-05, | |
"loss": 0.6581, | |
"step": 56 | |
}, | |
{ | |
"epoch": 8.0, | |
"eval_loss": 0.6720495223999023, | |
"eval_matthews_correlation": 0.08880213151158615, | |
"eval_runtime": 2.2243, | |
"eval_samples_per_second": 99.358, | |
"eval_steps_per_second": 1.798, | |
"step": 56 | |
}, | |
{ | |
"epoch": 8.142857142857142, | |
"grad_norm": 7.8047776222229, | |
"learning_rate": 9.558823529411764e-06, | |
"loss": 0.6782, | |
"step": 57 | |
}, | |
{ | |
"epoch": 8.142857142857142, | |
"eval_loss": 0.6721069812774658, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2247, | |
"eval_samples_per_second": 99.338, | |
"eval_steps_per_second": 1.798, | |
"step": 57 | |
}, | |
{ | |
"epoch": 8.285714285714286, | |
"grad_norm": 4.3581438064575195, | |
"learning_rate": 8.823529411764707e-06, | |
"loss": 0.6347, | |
"step": 58 | |
}, | |
{ | |
"epoch": 8.285714285714286, | |
"eval_loss": 0.6721727252006531, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2229, | |
"eval_samples_per_second": 99.419, | |
"eval_steps_per_second": 1.799, | |
"step": 58 | |
}, | |
{ | |
"epoch": 8.428571428571429, | |
"grad_norm": 2.5159852504730225, | |
"learning_rate": 8.088235294117648e-06, | |
"loss": 0.7283, | |
"step": 59 | |
}, | |
{ | |
"epoch": 8.428571428571429, | |
"eval_loss": 0.6728233695030212, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2205, | |
"eval_samples_per_second": 99.525, | |
"eval_steps_per_second": 1.801, | |
"step": 59 | |
}, | |
{ | |
"epoch": 8.571428571428571, | |
"grad_norm": 1.5487360954284668, | |
"learning_rate": 7.3529411764705884e-06, | |
"loss": 0.6773, | |
"step": 60 | |
}, | |
{ | |
"epoch": 8.571428571428571, | |
"eval_loss": 0.6722964644432068, | |
"eval_matthews_correlation": 0.10498393965092134, | |
"eval_runtime": 2.2257, | |
"eval_samples_per_second": 99.293, | |
"eval_steps_per_second": 1.797, | |
"step": 60 | |
}, | |
{ | |
"epoch": 8.714285714285714, | |
"grad_norm": 1.9800678491592407, | |
"learning_rate": 6.61764705882353e-06, | |
"loss": 0.6655, | |
"step": 61 | |
}, | |
{ | |
"epoch": 8.714285714285714, | |
"eval_loss": 0.6734304428100586, | |
"eval_matthews_correlation": 0.08880213151158615, | |
"eval_runtime": 2.222, | |
"eval_samples_per_second": 99.46, | |
"eval_steps_per_second": 1.8, | |
"step": 61 | |
}, | |
{ | |
"epoch": 8.857142857142858, | |
"grad_norm": 1.3144088983535767, | |
"learning_rate": 5.882352941176471e-06, | |
"loss": 0.6929, | |
"step": 62 | |
}, | |
{ | |
"epoch": 8.857142857142858, | |
"eval_loss": 0.6746340394020081, | |
"eval_matthews_correlation": 0.07355872296017515, | |
"eval_runtime": 2.2247, | |
"eval_samples_per_second": 99.339, | |
"eval_steps_per_second": 1.798, | |
"step": 62 | |
}, | |
{ | |
"epoch": 9.0, | |
"grad_norm": 8.409367561340332, | |
"learning_rate": 5.147058823529412e-06, | |
"loss": 0.6826, | |
"step": 63 | |
}, | |
{ | |
"epoch": 9.0, | |
"eval_loss": 0.6751106977462769, | |
"eval_matthews_correlation": 0.07355872296017515, | |
"eval_runtime": 2.1764, | |
"eval_samples_per_second": 101.543, | |
"eval_steps_per_second": 1.838, | |
"step": 63 | |
}, | |
{ | |
"epoch": 9.142857142857142, | |
"grad_norm": 2.5909886360168457, | |
"learning_rate": 4.411764705882353e-06, | |
"loss": 0.6776, | |
"step": 64 | |
}, | |
{ | |
"epoch": 9.142857142857142, | |
"eval_loss": 0.6752371788024902, | |
"eval_matthews_correlation": 0.05913439236313106, | |
"eval_runtime": 2.2214, | |
"eval_samples_per_second": 99.486, | |
"eval_steps_per_second": 1.801, | |
"step": 64 | |
}, | |
{ | |
"epoch": 9.285714285714286, | |
"grad_norm": 7.995858669281006, | |
"learning_rate": 3.6764705882352942e-06, | |
"loss": 0.6725, | |
"step": 65 | |
}, | |
{ | |
"epoch": 9.285714285714286, | |
"eval_loss": 0.6751791834831238, | |
"eval_matthews_correlation": 0.07355872296017515, | |
"eval_runtime": 2.224, | |
"eval_samples_per_second": 99.37, | |
"eval_steps_per_second": 1.799, | |
"step": 65 | |
}, | |
{ | |
"epoch": 9.428571428571429, | |
"grad_norm": 1.673750638961792, | |
"learning_rate": 2.9411764705882355e-06, | |
"loss": 0.7058, | |
"step": 66 | |
}, | |
{ | |
"epoch": 9.428571428571429, | |
"eval_loss": 0.675339937210083, | |
"eval_matthews_correlation": 0.07355872296017515, | |
"eval_runtime": 2.2212, | |
"eval_samples_per_second": 99.496, | |
"eval_steps_per_second": 1.801, | |
"step": 66 | |
}, | |
{ | |
"epoch": 9.571428571428571, | |
"grad_norm": 7.3099846839904785, | |
"learning_rate": 2.2058823529411767e-06, | |
"loss": 0.7259, | |
"step": 67 | |
}, | |
{ | |
"epoch": 9.571428571428571, | |
"eval_loss": 0.6756486892700195, | |
"eval_matthews_correlation": 0.05913439236313106, | |
"eval_runtime": 2.2254, | |
"eval_samples_per_second": 99.306, | |
"eval_steps_per_second": 1.797, | |
"step": 67 | |
}, | |
{ | |
"epoch": 9.714285714285714, | |
"grad_norm": 7.496833324432373, | |
"learning_rate": 1.4705882352941177e-06, | |
"loss": 0.6749, | |
"step": 68 | |
}, | |
{ | |
"epoch": 9.714285714285714, | |
"eval_loss": 0.6755614280700684, | |
"eval_matthews_correlation": 0.07355872296017515, | |
"eval_runtime": 2.226, | |
"eval_samples_per_second": 99.28, | |
"eval_steps_per_second": 1.797, | |
"step": 68 | |
}, | |
{ | |
"epoch": 9.857142857142858, | |
"grad_norm": 3.219705820083618, | |
"learning_rate": 7.352941176470589e-07, | |
"loss": 0.6631, | |
"step": 69 | |
}, | |
{ | |
"epoch": 9.857142857142858, | |
"eval_loss": 0.675048828125, | |
"eval_matthews_correlation": 0.07355872296017515, | |
"eval_runtime": 2.2259, | |
"eval_samples_per_second": 99.284, | |
"eval_steps_per_second": 1.797, | |
"step": 69 | |
}, | |
{ | |
"epoch": 10.0, | |
"grad_norm": 1.607999563217163, | |
"learning_rate": 0.0, | |
"loss": 0.693, | |
"step": 70 | |
}, | |
{ | |
"epoch": 10.0, | |
"eval_loss": 0.6753774881362915, | |
"eval_matthews_correlation": 0.07355872296017515, | |
"eval_runtime": 2.1704, | |
"eval_samples_per_second": 101.827, | |
"eval_steps_per_second": 1.843, | |
"step": 70 | |
}, | |
{ | |
"epoch": 10.0, | |
"step": 70, | |
"total_flos": 96283913355264.0, | |
"train_loss": 0.7061511448451451, | |
"train_runtime": 450.3922, | |
"train_samples_per_second": 19.605, | |
"train_steps_per_second": 0.155 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 70, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 10, | |
"save_steps": 500, | |
"total_flos": 96283913355264.0, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |