v1_mistral_lora / last-checkpoint /trainer_state.json
mtzig's picture
Training in progress, step 100, checkpoint
b5df98c verified
raw
history blame
20.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.07654037504783773,
"eval_steps": 20,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_accuracy": 0.648014440433213,
"eval_f1": 0.5578231292517006,
"eval_loss": 0.6320980787277222,
"eval_precision": 0.5125,
"eval_recall": 0.6119402985074627,
"eval_runtime": 43.0693,
"eval_samples_per_second": 6.989,
"eval_steps_per_second": 0.232,
"step": 0
},
{
"epoch": 0.0007654037504783774,
"grad_norm": 2.9013254642486572,
"learning_rate": 1.5267175572519085e-07,
"loss": 0.7916,
"step": 1
},
{
"epoch": 0.0015308075009567547,
"grad_norm": 2.805720567703247,
"learning_rate": 3.053435114503817e-07,
"loss": 0.7752,
"step": 2
},
{
"epoch": 0.002296211251435132,
"grad_norm": 2.7805535793304443,
"learning_rate": 4.5801526717557257e-07,
"loss": 0.7902,
"step": 3
},
{
"epoch": 0.0030616150019135095,
"grad_norm": 3.5079779624938965,
"learning_rate": 6.106870229007634e-07,
"loss": 0.7335,
"step": 4
},
{
"epoch": 0.003827018752391887,
"grad_norm": 2.8343734741210938,
"learning_rate": 7.633587786259543e-07,
"loss": 0.7026,
"step": 5
},
{
"epoch": 0.004592422502870264,
"grad_norm": 3.3417398929595947,
"learning_rate": 9.160305343511451e-07,
"loss": 0.7486,
"step": 6
},
{
"epoch": 0.005357826253348641,
"grad_norm": 3.5592422485351562,
"learning_rate": 1.068702290076336e-06,
"loss": 0.7085,
"step": 7
},
{
"epoch": 0.006123230003827019,
"grad_norm": 4.005152702331543,
"learning_rate": 1.2213740458015268e-06,
"loss": 0.7904,
"step": 8
},
{
"epoch": 0.006888633754305396,
"grad_norm": 3.1045947074890137,
"learning_rate": 1.3740458015267178e-06,
"loss": 0.7956,
"step": 9
},
{
"epoch": 0.007654037504783774,
"grad_norm": 2.570974826812744,
"learning_rate": 1.5267175572519086e-06,
"loss": 0.7206,
"step": 10
},
{
"epoch": 0.008419441255262151,
"grad_norm": 2.293649911880493,
"learning_rate": 1.6793893129770995e-06,
"loss": 0.6451,
"step": 11
},
{
"epoch": 0.009184845005740528,
"grad_norm": 2.148493528366089,
"learning_rate": 1.8320610687022903e-06,
"loss": 0.5701,
"step": 12
},
{
"epoch": 0.009950248756218905,
"grad_norm": 3.934507131576538,
"learning_rate": 1.984732824427481e-06,
"loss": 0.7146,
"step": 13
},
{
"epoch": 0.010715652506697282,
"grad_norm": 2.8026010990142822,
"learning_rate": 2.137404580152672e-06,
"loss": 0.7812,
"step": 14
},
{
"epoch": 0.011481056257175661,
"grad_norm": 2.6908481121063232,
"learning_rate": 2.2900763358778625e-06,
"loss": 0.6653,
"step": 15
},
{
"epoch": 0.012246460007654038,
"grad_norm": 2.2652783393859863,
"learning_rate": 2.4427480916030536e-06,
"loss": 0.6606,
"step": 16
},
{
"epoch": 0.013011863758132415,
"grad_norm": 3.234048843383789,
"learning_rate": 2.595419847328244e-06,
"loss": 0.7316,
"step": 17
},
{
"epoch": 0.013777267508610792,
"grad_norm": 2.967696189880371,
"learning_rate": 2.7480916030534356e-06,
"loss": 0.6979,
"step": 18
},
{
"epoch": 0.01454267125908917,
"grad_norm": 3.8475921154022217,
"learning_rate": 2.900763358778626e-06,
"loss": 0.8055,
"step": 19
},
{
"epoch": 0.015308075009567547,
"grad_norm": 2.42297101020813,
"learning_rate": 3.0534351145038173e-06,
"loss": 0.6598,
"step": 20
},
{
"epoch": 0.015308075009567547,
"eval_accuracy": 0.6552346570397112,
"eval_f1": 0.5526932084309133,
"eval_loss": 0.6283615231513977,
"eval_precision": 0.5221238938053098,
"eval_recall": 0.5870646766169154,
"eval_runtime": 43.704,
"eval_samples_per_second": 6.887,
"eval_steps_per_second": 0.229,
"step": 20
},
{
"epoch": 0.016073478760045924,
"grad_norm": 2.063398838043213,
"learning_rate": 3.206106870229008e-06,
"loss": 0.6512,
"step": 21
},
{
"epoch": 0.016838882510524303,
"grad_norm": 2.4448840618133545,
"learning_rate": 3.358778625954199e-06,
"loss": 0.567,
"step": 22
},
{
"epoch": 0.017604286261002678,
"grad_norm": 3.497593641281128,
"learning_rate": 3.5114503816793895e-06,
"loss": 0.7674,
"step": 23
},
{
"epoch": 0.018369690011481057,
"grad_norm": 1.899181842803955,
"learning_rate": 3.6641221374045806e-06,
"loss": 0.6927,
"step": 24
},
{
"epoch": 0.019135093761959432,
"grad_norm": 1.9708737134933472,
"learning_rate": 3.816793893129772e-06,
"loss": 0.7301,
"step": 25
},
{
"epoch": 0.01990049751243781,
"grad_norm": 2.531264543533325,
"learning_rate": 3.969465648854962e-06,
"loss": 0.7908,
"step": 26
},
{
"epoch": 0.02066590126291619,
"grad_norm": 3.4307644367218018,
"learning_rate": 4.122137404580153e-06,
"loss": 0.7273,
"step": 27
},
{
"epoch": 0.021431305013394564,
"grad_norm": 2.6872708797454834,
"learning_rate": 4.274809160305344e-06,
"loss": 0.7336,
"step": 28
},
{
"epoch": 0.022196708763872943,
"grad_norm": 3.0511341094970703,
"learning_rate": 4.427480916030535e-06,
"loss": 0.6708,
"step": 29
},
{
"epoch": 0.022962112514351322,
"grad_norm": 2.479977607727051,
"learning_rate": 4.580152671755725e-06,
"loss": 0.7374,
"step": 30
},
{
"epoch": 0.023727516264829697,
"grad_norm": 3.206690549850464,
"learning_rate": 4.732824427480917e-06,
"loss": 0.7716,
"step": 31
},
{
"epoch": 0.024492920015308076,
"grad_norm": 2.561702013015747,
"learning_rate": 4.885496183206107e-06,
"loss": 0.8201,
"step": 32
},
{
"epoch": 0.02525832376578645,
"grad_norm": 2.986666679382324,
"learning_rate": 5.038167938931297e-06,
"loss": 0.7439,
"step": 33
},
{
"epoch": 0.02602372751626483,
"grad_norm": 1.9290986061096191,
"learning_rate": 5.190839694656488e-06,
"loss": 0.6164,
"step": 34
},
{
"epoch": 0.026789131266743208,
"grad_norm": 2.402761459350586,
"learning_rate": 5.34351145038168e-06,
"loss": 0.7119,
"step": 35
},
{
"epoch": 0.027554535017221583,
"grad_norm": 2.68208384513855,
"learning_rate": 5.496183206106871e-06,
"loss": 0.6692,
"step": 36
},
{
"epoch": 0.028319938767699962,
"grad_norm": 2.0670857429504395,
"learning_rate": 5.648854961832062e-06,
"loss": 0.6724,
"step": 37
},
{
"epoch": 0.02908534251817834,
"grad_norm": 2.9470982551574707,
"learning_rate": 5.801526717557252e-06,
"loss": 0.66,
"step": 38
},
{
"epoch": 0.029850746268656716,
"grad_norm": 2.172914981842041,
"learning_rate": 5.9541984732824435e-06,
"loss": 0.6567,
"step": 39
},
{
"epoch": 0.030616150019135095,
"grad_norm": 2.658773183822632,
"learning_rate": 6.1068702290076346e-06,
"loss": 0.6948,
"step": 40
},
{
"epoch": 0.030616150019135095,
"eval_accuracy": 0.6787003610108303,
"eval_f1": 0.5,
"eval_loss": 0.6222203373908997,
"eval_precision": 0.5741935483870968,
"eval_recall": 0.4427860696517413,
"eval_runtime": 43.9963,
"eval_samples_per_second": 6.841,
"eval_steps_per_second": 0.227,
"step": 40
},
{
"epoch": 0.03138155376961347,
"grad_norm": 2.3062996864318848,
"learning_rate": 6.259541984732826e-06,
"loss": 0.7169,
"step": 41
},
{
"epoch": 0.03214695752009185,
"grad_norm": 2.899698495864868,
"learning_rate": 6.412213740458016e-06,
"loss": 0.65,
"step": 42
},
{
"epoch": 0.03291236127057023,
"grad_norm": 2.191978931427002,
"learning_rate": 6.564885496183207e-06,
"loss": 0.6741,
"step": 43
},
{
"epoch": 0.033677765021048606,
"grad_norm": 2.6911063194274902,
"learning_rate": 6.717557251908398e-06,
"loss": 0.652,
"step": 44
},
{
"epoch": 0.03444316877152698,
"grad_norm": 2.832355260848999,
"learning_rate": 6.870229007633589e-06,
"loss": 0.7163,
"step": 45
},
{
"epoch": 0.035208572522005356,
"grad_norm": 2.7549805641174316,
"learning_rate": 7.022900763358779e-06,
"loss": 0.6482,
"step": 46
},
{
"epoch": 0.035973976272483735,
"grad_norm": 2.533820390701294,
"learning_rate": 7.17557251908397e-06,
"loss": 0.6657,
"step": 47
},
{
"epoch": 0.03673938002296211,
"grad_norm": 2.542445182800293,
"learning_rate": 7.328244274809161e-06,
"loss": 0.6803,
"step": 48
},
{
"epoch": 0.03750478377344049,
"grad_norm": 2.254171848297119,
"learning_rate": 7.480916030534352e-06,
"loss": 0.6628,
"step": 49
},
{
"epoch": 0.038270187523918864,
"grad_norm": 2.2992422580718994,
"learning_rate": 7.633587786259543e-06,
"loss": 0.6237,
"step": 50
},
{
"epoch": 0.03903559127439724,
"grad_norm": 2.2923660278320312,
"learning_rate": 7.786259541984733e-06,
"loss": 0.636,
"step": 51
},
{
"epoch": 0.03980099502487562,
"grad_norm": 3.348690986633301,
"learning_rate": 7.938931297709924e-06,
"loss": 0.7033,
"step": 52
},
{
"epoch": 0.040566398775354,
"grad_norm": 2.794326066970825,
"learning_rate": 8.091603053435115e-06,
"loss": 0.7764,
"step": 53
},
{
"epoch": 0.04133180252583238,
"grad_norm": 2.662022352218628,
"learning_rate": 8.244274809160306e-06,
"loss": 0.6787,
"step": 54
},
{
"epoch": 0.04209720627631076,
"grad_norm": 2.623007297515869,
"learning_rate": 8.396946564885497e-06,
"loss": 0.6481,
"step": 55
},
{
"epoch": 0.04286261002678913,
"grad_norm": 2.626187801361084,
"learning_rate": 8.549618320610688e-06,
"loss": 0.5648,
"step": 56
},
{
"epoch": 0.04362801377726751,
"grad_norm": 3.5378401279449463,
"learning_rate": 8.702290076335879e-06,
"loss": 0.7537,
"step": 57
},
{
"epoch": 0.044393417527745886,
"grad_norm": 2.221616744995117,
"learning_rate": 8.85496183206107e-06,
"loss": 0.5946,
"step": 58
},
{
"epoch": 0.045158821278224265,
"grad_norm": 2.4003000259399414,
"learning_rate": 9.007633587786259e-06,
"loss": 0.6146,
"step": 59
},
{
"epoch": 0.045924225028702644,
"grad_norm": 3.179100751876831,
"learning_rate": 9.16030534351145e-06,
"loss": 0.6394,
"step": 60
},
{
"epoch": 0.045924225028702644,
"eval_accuracy": 0.6877256317689531,
"eval_f1": 0.4507936507936508,
"eval_loss": 0.6186545491218567,
"eval_precision": 0.6228070175438597,
"eval_recall": 0.35323383084577115,
"eval_runtime": 43.6865,
"eval_samples_per_second": 6.89,
"eval_steps_per_second": 0.229,
"step": 60
},
{
"epoch": 0.046689628779181015,
"grad_norm": 2.6747021675109863,
"learning_rate": 9.312977099236641e-06,
"loss": 0.6995,
"step": 61
},
{
"epoch": 0.047455032529659394,
"grad_norm": 2.2150821685791016,
"learning_rate": 9.465648854961834e-06,
"loss": 0.6553,
"step": 62
},
{
"epoch": 0.04822043628013777,
"grad_norm": 4.880781173706055,
"learning_rate": 9.618320610687025e-06,
"loss": 0.695,
"step": 63
},
{
"epoch": 0.04898584003061615,
"grad_norm": 2.353868007659912,
"learning_rate": 9.770992366412214e-06,
"loss": 0.6338,
"step": 64
},
{
"epoch": 0.04975124378109453,
"grad_norm": 2.5536489486694336,
"learning_rate": 9.923664122137405e-06,
"loss": 0.6623,
"step": 65
},
{
"epoch": 0.0505166475315729,
"grad_norm": 2.4389688968658447,
"learning_rate": 1.0076335877862595e-05,
"loss": 0.676,
"step": 66
},
{
"epoch": 0.05128205128205128,
"grad_norm": 2.0447139739990234,
"learning_rate": 1.0229007633587786e-05,
"loss": 0.5571,
"step": 67
},
{
"epoch": 0.05204745503252966,
"grad_norm": 2.9738974571228027,
"learning_rate": 1.0381679389312977e-05,
"loss": 0.7705,
"step": 68
},
{
"epoch": 0.05281285878300804,
"grad_norm": 2.7463932037353516,
"learning_rate": 1.0534351145038168e-05,
"loss": 0.6645,
"step": 69
},
{
"epoch": 0.053578262533486416,
"grad_norm": 2.617324113845825,
"learning_rate": 1.068702290076336e-05,
"loss": 0.6211,
"step": 70
},
{
"epoch": 0.05434366628396479,
"grad_norm": 2.629154920578003,
"learning_rate": 1.0839694656488552e-05,
"loss": 0.6189,
"step": 71
},
{
"epoch": 0.05510907003444317,
"grad_norm": 3.0344226360321045,
"learning_rate": 1.0992366412213743e-05,
"loss": 0.7318,
"step": 72
},
{
"epoch": 0.055874473784921545,
"grad_norm": 2.5670666694641113,
"learning_rate": 1.1145038167938934e-05,
"loss": 0.6702,
"step": 73
},
{
"epoch": 0.056639877535399924,
"grad_norm": 2.1741843223571777,
"learning_rate": 1.1297709923664125e-05,
"loss": 0.6686,
"step": 74
},
{
"epoch": 0.0574052812858783,
"grad_norm": 2.724057674407959,
"learning_rate": 1.1450381679389312e-05,
"loss": 0.6252,
"step": 75
},
{
"epoch": 0.05817068503635668,
"grad_norm": 2.225811719894409,
"learning_rate": 1.1603053435114503e-05,
"loss": 0.5888,
"step": 76
},
{
"epoch": 0.05893608878683505,
"grad_norm": 2.8234739303588867,
"learning_rate": 1.1755725190839696e-05,
"loss": 0.5833,
"step": 77
},
{
"epoch": 0.05970149253731343,
"grad_norm": 2.3463521003723145,
"learning_rate": 1.1908396946564887e-05,
"loss": 0.6519,
"step": 78
},
{
"epoch": 0.06046689628779181,
"grad_norm": 2.0764167308807373,
"learning_rate": 1.2061068702290078e-05,
"loss": 0.6022,
"step": 79
},
{
"epoch": 0.06123230003827019,
"grad_norm": 2.889023542404175,
"learning_rate": 1.2213740458015269e-05,
"loss": 0.6466,
"step": 80
},
{
"epoch": 0.06123230003827019,
"eval_accuracy": 0.7148014440433214,
"eval_f1": 0.5752688172043011,
"eval_loss": 0.5946354866027832,
"eval_precision": 0.6257309941520468,
"eval_recall": 0.5323383084577115,
"eval_runtime": 43.854,
"eval_samples_per_second": 6.864,
"eval_steps_per_second": 0.228,
"step": 80
},
{
"epoch": 0.06199770378874857,
"grad_norm": 2.1842238903045654,
"learning_rate": 1.236641221374046e-05,
"loss": 0.6137,
"step": 81
},
{
"epoch": 0.06276310753922694,
"grad_norm": 2.382739543914795,
"learning_rate": 1.2519083969465651e-05,
"loss": 0.5988,
"step": 82
},
{
"epoch": 0.06352851128970533,
"grad_norm": 2.536407470703125,
"learning_rate": 1.2671755725190839e-05,
"loss": 0.6698,
"step": 83
},
{
"epoch": 0.0642939150401837,
"grad_norm": 3.341318130493164,
"learning_rate": 1.2824427480916032e-05,
"loss": 0.632,
"step": 84
},
{
"epoch": 0.06505931879066207,
"grad_norm": 2.605912446975708,
"learning_rate": 1.2977099236641223e-05,
"loss": 0.6817,
"step": 85
},
{
"epoch": 0.06582472254114045,
"grad_norm": 2.7820451259613037,
"learning_rate": 1.3129770992366414e-05,
"loss": 0.5941,
"step": 86
},
{
"epoch": 0.06659012629161883,
"grad_norm": 2.19356632232666,
"learning_rate": 1.3282442748091605e-05,
"loss": 0.5755,
"step": 87
},
{
"epoch": 0.06735553004209721,
"grad_norm": 2.6070029735565186,
"learning_rate": 1.3435114503816796e-05,
"loss": 0.6472,
"step": 88
},
{
"epoch": 0.06812093379257558,
"grad_norm": 3.145352363586426,
"learning_rate": 1.3587786259541987e-05,
"loss": 0.6356,
"step": 89
},
{
"epoch": 0.06888633754305395,
"grad_norm": 2.909482955932617,
"learning_rate": 1.3740458015267178e-05,
"loss": 0.5926,
"step": 90
},
{
"epoch": 0.06965174129353234,
"grad_norm": 2.7917375564575195,
"learning_rate": 1.3893129770992369e-05,
"loss": 0.5445,
"step": 91
},
{
"epoch": 0.07041714504401071,
"grad_norm": 2.8729193210601807,
"learning_rate": 1.4045801526717558e-05,
"loss": 0.6508,
"step": 92
},
{
"epoch": 0.0711825487944891,
"grad_norm": 3.1102027893066406,
"learning_rate": 1.4198473282442749e-05,
"loss": 0.6189,
"step": 93
},
{
"epoch": 0.07194795254496747,
"grad_norm": 3.4606645107269287,
"learning_rate": 1.435114503816794e-05,
"loss": 0.5364,
"step": 94
},
{
"epoch": 0.07271335629544584,
"grad_norm": 2.350576877593994,
"learning_rate": 1.4503816793893131e-05,
"loss": 0.5593,
"step": 95
},
{
"epoch": 0.07347876004592423,
"grad_norm": 3.097646474838257,
"learning_rate": 1.4656488549618322e-05,
"loss": 0.72,
"step": 96
},
{
"epoch": 0.0742441637964026,
"grad_norm": 2.978395938873291,
"learning_rate": 1.4809160305343513e-05,
"loss": 0.648,
"step": 97
},
{
"epoch": 0.07500956754688098,
"grad_norm": 3.397475004196167,
"learning_rate": 1.4961832061068704e-05,
"loss": 0.5215,
"step": 98
},
{
"epoch": 0.07577497129735936,
"grad_norm": 2.8847622871398926,
"learning_rate": 1.5114503816793895e-05,
"loss": 0.5677,
"step": 99
},
{
"epoch": 0.07654037504783773,
"grad_norm": 2.4467875957489014,
"learning_rate": 1.5267175572519086e-05,
"loss": 0.5551,
"step": 100
},
{
"epoch": 0.07654037504783773,
"eval_accuracy": 0.7256317689530686,
"eval_f1": 0.6346153846153846,
"eval_loss": 0.5565891265869141,
"eval_precision": 0.6139534883720931,
"eval_recall": 0.6567164179104478,
"eval_runtime": 44.3839,
"eval_samples_per_second": 6.782,
"eval_steps_per_second": 0.225,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 1306,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.5277223846084608e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}