HaileyJu's picture
End of training
3013114 verified
raw
history blame contribute delete
No virus
27.6 kB
{
"best_metric": 0.6274509803921569,
"best_model_checkpoint": "/users/hailey/code/03_code/SBD_ai/videomae-base-finetuned-ucf101-subset-SBDtoy/checkpoint-492",
"epoch": 29.009166666666665,
"eval_steps": 500,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 8.483016014099121,
"learning_rate": 4.166666666666667e-06,
"loss": 0.8897,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 10.62829303741455,
"learning_rate": 8.333333333333334e-06,
"loss": 0.7501,
"step": 20
},
{
"epoch": 0.03,
"grad_norm": 10.224696159362793,
"learning_rate": 1.25e-05,
"loss": 0.6748,
"step": 30
},
{
"epoch": 0.03,
"grad_norm": 5.6519365310668945,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.7634,
"step": 40
},
{
"epoch": 0.03,
"eval_accuracy": 0.5882352941176471,
"eval_loss": 0.6320443153381348,
"eval_runtime": 137.732,
"eval_samples_per_second": 0.37,
"eval_steps_per_second": 0.189,
"step": 41
},
{
"epoch": 1.01,
"grad_norm": 8.263680458068848,
"learning_rate": 2.0833333333333336e-05,
"loss": 0.7384,
"step": 50
},
{
"epoch": 1.02,
"grad_norm": 9.806657791137695,
"learning_rate": 2.5e-05,
"loss": 0.6558,
"step": 60
},
{
"epoch": 1.02,
"grad_norm": 4.578342437744141,
"learning_rate": 2.916666666666667e-05,
"loss": 0.569,
"step": 70
},
{
"epoch": 1.03,
"grad_norm": 1.7302507162094116,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.7445,
"step": 80
},
{
"epoch": 1.03,
"eval_accuracy": 0.5686274509803921,
"eval_loss": 1.4197746515274048,
"eval_runtime": 132.2118,
"eval_samples_per_second": 0.386,
"eval_steps_per_second": 0.197,
"step": 82
},
{
"epoch": 2.01,
"grad_norm": 29.237693786621094,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.7204,
"step": 90
},
{
"epoch": 2.02,
"grad_norm": 3.5326147079467773,
"learning_rate": 4.166666666666667e-05,
"loss": 1.7924,
"step": 100
},
{
"epoch": 2.02,
"grad_norm": 10.499820709228516,
"learning_rate": 4.5833333333333334e-05,
"loss": 0.6213,
"step": 110
},
{
"epoch": 2.03,
"grad_norm": 6.973377227783203,
"learning_rate": 5e-05,
"loss": 0.4931,
"step": 120
},
{
"epoch": 2.03,
"eval_accuracy": 0.5686274509803921,
"eval_loss": 1.3688796758651733,
"eval_runtime": 132.0446,
"eval_samples_per_second": 0.386,
"eval_steps_per_second": 0.197,
"step": 123
},
{
"epoch": 3.01,
"grad_norm": 24.18391990661621,
"learning_rate": 4.9537037037037035e-05,
"loss": 0.6786,
"step": 130
},
{
"epoch": 3.01,
"grad_norm": 19.578279495239258,
"learning_rate": 4.9074074074074075e-05,
"loss": 0.597,
"step": 140
},
{
"epoch": 3.02,
"grad_norm": 0.4028966724872589,
"learning_rate": 4.8611111111111115e-05,
"loss": 0.7246,
"step": 150
},
{
"epoch": 3.03,
"grad_norm": 29.119096755981445,
"learning_rate": 4.814814814814815e-05,
"loss": 0.7206,
"step": 160
},
{
"epoch": 3.03,
"eval_accuracy": 0.5882352941176471,
"eval_loss": 1.328170657157898,
"eval_runtime": 134.48,
"eval_samples_per_second": 0.379,
"eval_steps_per_second": 0.193,
"step": 164
},
{
"epoch": 4.0,
"grad_norm": 4.413028717041016,
"learning_rate": 4.768518518518519e-05,
"loss": 0.6192,
"step": 170
},
{
"epoch": 4.01,
"grad_norm": 0.44842419028282166,
"learning_rate": 4.722222222222222e-05,
"loss": 0.3903,
"step": 180
},
{
"epoch": 4.02,
"grad_norm": 9.306065559387207,
"learning_rate": 4.675925925925926e-05,
"loss": 0.7218,
"step": 190
},
{
"epoch": 4.03,
"grad_norm": 52.77680587768555,
"learning_rate": 4.62962962962963e-05,
"loss": 0.735,
"step": 200
},
{
"epoch": 4.03,
"eval_accuracy": 0.5686274509803921,
"eval_loss": 2.0849499702453613,
"eval_runtime": 132.8084,
"eval_samples_per_second": 0.384,
"eval_steps_per_second": 0.196,
"step": 205
},
{
"epoch": 5.0,
"grad_norm": 20.23072624206543,
"learning_rate": 4.5833333333333334e-05,
"loss": 0.9199,
"step": 210
},
{
"epoch": 5.01,
"grad_norm": 3.353752613067627,
"learning_rate": 4.5370370370370374e-05,
"loss": 0.4171,
"step": 220
},
{
"epoch": 5.02,
"grad_norm": 0.07062011957168579,
"learning_rate": 4.490740740740741e-05,
"loss": 0.1716,
"step": 230
},
{
"epoch": 5.03,
"grad_norm": 17.275123596191406,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.769,
"step": 240
},
{
"epoch": 5.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 1.0332462787628174,
"eval_runtime": 141.0066,
"eval_samples_per_second": 0.362,
"eval_steps_per_second": 0.184,
"step": 246
},
{
"epoch": 6.0,
"grad_norm": 1.1296720504760742,
"learning_rate": 4.3981481481481486e-05,
"loss": 0.4991,
"step": 250
},
{
"epoch": 6.01,
"grad_norm": 0.18362000584602356,
"learning_rate": 4.351851851851852e-05,
"loss": 0.4565,
"step": 260
},
{
"epoch": 6.02,
"grad_norm": 9.544737815856934,
"learning_rate": 4.305555555555556e-05,
"loss": 1.5794,
"step": 270
},
{
"epoch": 6.03,
"grad_norm": 9.105454444885254,
"learning_rate": 4.259259259259259e-05,
"loss": 0.9494,
"step": 280
},
{
"epoch": 6.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.130749464035034,
"eval_runtime": 147.5482,
"eval_samples_per_second": 0.346,
"eval_steps_per_second": 0.176,
"step": 287
},
{
"epoch": 7.0,
"grad_norm": 1.585872769355774,
"learning_rate": 4.212962962962963e-05,
"loss": 0.3491,
"step": 290
},
{
"epoch": 7.01,
"grad_norm": 6.12731409072876,
"learning_rate": 4.166666666666667e-05,
"loss": 0.5312,
"step": 300
},
{
"epoch": 7.02,
"grad_norm": 20.4097957611084,
"learning_rate": 4.1203703703703705e-05,
"loss": 0.6494,
"step": 310
},
{
"epoch": 7.03,
"grad_norm": 1.075332522392273,
"learning_rate": 4.074074074074074e-05,
"loss": 0.3857,
"step": 320
},
{
"epoch": 7.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.598536252975464,
"eval_runtime": 141.9381,
"eval_samples_per_second": 0.359,
"eval_steps_per_second": 0.183,
"step": 328
},
{
"epoch": 8.0,
"grad_norm": 0.12138959765434265,
"learning_rate": 4.027777777777778e-05,
"loss": 0.8439,
"step": 330
},
{
"epoch": 8.01,
"grad_norm": 68.02975463867188,
"learning_rate": 3.981481481481482e-05,
"loss": 0.8624,
"step": 340
},
{
"epoch": 8.02,
"grad_norm": 0.4764052927494049,
"learning_rate": 3.935185185185186e-05,
"loss": 0.3225,
"step": 350
},
{
"epoch": 8.03,
"grad_norm": 1.0622520446777344,
"learning_rate": 3.888888888888889e-05,
"loss": 0.3639,
"step": 360
},
{
"epoch": 8.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.2299933433532715,
"eval_runtime": 147.3983,
"eval_samples_per_second": 0.346,
"eval_steps_per_second": 0.176,
"step": 369
},
{
"epoch": 9.0,
"grad_norm": 24.868986129760742,
"learning_rate": 3.8425925925925924e-05,
"loss": 0.6411,
"step": 370
},
{
"epoch": 9.01,
"grad_norm": 0.08051346242427826,
"learning_rate": 3.7962962962962964e-05,
"loss": 0.2678,
"step": 380
},
{
"epoch": 9.02,
"grad_norm": 14.524049758911133,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.1313,
"step": 390
},
{
"epoch": 9.03,
"grad_norm": 5.631989479064941,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.7205,
"step": 400
},
{
"epoch": 9.03,
"grad_norm": 0.37318071722984314,
"learning_rate": 3.6574074074074076e-05,
"loss": 0.6456,
"step": 410
},
{
"epoch": 9.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 1.8992350101470947,
"eval_runtime": 135.9679,
"eval_samples_per_second": 0.375,
"eval_steps_per_second": 0.191,
"step": 410
},
{
"epoch": 10.01,
"grad_norm": 0.06262947618961334,
"learning_rate": 3.611111111111111e-05,
"loss": 0.331,
"step": 420
},
{
"epoch": 10.02,
"grad_norm": 0.16807079315185547,
"learning_rate": 3.564814814814815e-05,
"loss": 0.4851,
"step": 430
},
{
"epoch": 10.03,
"grad_norm": 0.23209631443023682,
"learning_rate": 3.518518518518519e-05,
"loss": 0.6103,
"step": 440
},
{
"epoch": 10.03,
"grad_norm": 0.20488207042217255,
"learning_rate": 3.472222222222222e-05,
"loss": 0.9483,
"step": 450
},
{
"epoch": 10.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 1.955637812614441,
"eval_runtime": 130.9716,
"eval_samples_per_second": 0.389,
"eval_steps_per_second": 0.199,
"step": 451
},
{
"epoch": 11.01,
"grad_norm": 42.45355987548828,
"learning_rate": 3.425925925925926e-05,
"loss": 0.3743,
"step": 460
},
{
"epoch": 11.02,
"grad_norm": 0.16700521111488342,
"learning_rate": 3.3796296296296295e-05,
"loss": 0.368,
"step": 470
},
{
"epoch": 11.02,
"grad_norm": 1.189526915550232,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.8548,
"step": 480
},
{
"epoch": 11.03,
"grad_norm": 23.483234405517578,
"learning_rate": 3.2870370370370375e-05,
"loss": 0.4518,
"step": 490
},
{
"epoch": 11.03,
"eval_accuracy": 0.6274509803921569,
"eval_loss": 1.834560751914978,
"eval_runtime": 139.8695,
"eval_samples_per_second": 0.365,
"eval_steps_per_second": 0.186,
"step": 492
},
{
"epoch": 12.01,
"grad_norm": 14.332037925720215,
"learning_rate": 3.240740740740741e-05,
"loss": 0.3553,
"step": 500
},
{
"epoch": 12.02,
"grad_norm": 0.2652936279773712,
"learning_rate": 3.194444444444444e-05,
"loss": 0.3726,
"step": 510
},
{
"epoch": 12.02,
"grad_norm": 0.1738712340593338,
"learning_rate": 3.148148148148148e-05,
"loss": 0.4627,
"step": 520
},
{
"epoch": 12.03,
"grad_norm": 16.079055786132812,
"learning_rate": 3.101851851851852e-05,
"loss": 0.9109,
"step": 530
},
{
"epoch": 12.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 1.993680477142334,
"eval_runtime": 138.9405,
"eval_samples_per_second": 0.367,
"eval_steps_per_second": 0.187,
"step": 533
},
{
"epoch": 13.01,
"grad_norm": 0.2999970018863678,
"learning_rate": 3.055555555555556e-05,
"loss": 0.1598,
"step": 540
},
{
"epoch": 13.01,
"grad_norm": 0.08949922025203705,
"learning_rate": 3.0092592592592593e-05,
"loss": 0.0925,
"step": 550
},
{
"epoch": 13.02,
"grad_norm": 13.087860107421875,
"learning_rate": 2.962962962962963e-05,
"loss": 0.8925,
"step": 560
},
{
"epoch": 13.03,
"grad_norm": 12.626638412475586,
"learning_rate": 2.916666666666667e-05,
"loss": 0.5441,
"step": 570
},
{
"epoch": 13.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 1.401322364807129,
"eval_runtime": 140.7378,
"eval_samples_per_second": 0.362,
"eval_steps_per_second": 0.185,
"step": 574
},
{
"epoch": 14.01,
"grad_norm": 0.09884772449731827,
"learning_rate": 2.8703703703703706e-05,
"loss": 0.2266,
"step": 580
},
{
"epoch": 14.01,
"grad_norm": 0.16622687876224518,
"learning_rate": 2.824074074074074e-05,
"loss": 0.0233,
"step": 590
},
{
"epoch": 14.02,
"grad_norm": 0.08121990412473679,
"learning_rate": 2.777777777777778e-05,
"loss": 0.4697,
"step": 600
},
{
"epoch": 14.03,
"grad_norm": 0.04321848973631859,
"learning_rate": 2.7314814814814816e-05,
"loss": 0.4035,
"step": 610
},
{
"epoch": 14.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 1.9077707529067993,
"eval_runtime": 140.6526,
"eval_samples_per_second": 0.363,
"eval_steps_per_second": 0.185,
"step": 615
},
{
"epoch": 15.0,
"grad_norm": 0.08672894537448883,
"learning_rate": 2.6851851851851855e-05,
"loss": 0.7602,
"step": 620
},
{
"epoch": 15.01,
"grad_norm": 0.03848231956362724,
"learning_rate": 2.6388888888888892e-05,
"loss": 0.0041,
"step": 630
},
{
"epoch": 15.02,
"grad_norm": 0.09817267954349518,
"learning_rate": 2.5925925925925925e-05,
"loss": 0.7232,
"step": 640
},
{
"epoch": 15.03,
"grad_norm": 2.0446386337280273,
"learning_rate": 2.5462962962962965e-05,
"loss": 0.1713,
"step": 650
},
{
"epoch": 15.03,
"eval_accuracy": 0.5882352941176471,
"eval_loss": 2.080268383026123,
"eval_runtime": 140.5712,
"eval_samples_per_second": 0.363,
"eval_steps_per_second": 0.185,
"step": 656
},
{
"epoch": 16.0,
"grad_norm": 13.678278923034668,
"learning_rate": 2.5e-05,
"loss": 0.4517,
"step": 660
},
{
"epoch": 16.01,
"grad_norm": 6.980419635772705,
"learning_rate": 2.4537037037037038e-05,
"loss": 0.0059,
"step": 670
},
{
"epoch": 16.02,
"grad_norm": 0.3049313724040985,
"learning_rate": 2.4074074074074074e-05,
"loss": 0.0051,
"step": 680
},
{
"epoch": 16.03,
"grad_norm": 0.06310291588306427,
"learning_rate": 2.361111111111111e-05,
"loss": 0.0542,
"step": 690
},
{
"epoch": 16.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.543215036392212,
"eval_runtime": 139.4428,
"eval_samples_per_second": 0.366,
"eval_steps_per_second": 0.186,
"step": 697
},
{
"epoch": 17.0,
"grad_norm": 0.047225985676050186,
"learning_rate": 2.314814814814815e-05,
"loss": 0.4816,
"step": 700
},
{
"epoch": 17.01,
"grad_norm": 0.025472501292824745,
"learning_rate": 2.2685185185185187e-05,
"loss": 0.2573,
"step": 710
},
{
"epoch": 17.02,
"grad_norm": 0.1224554181098938,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.0025,
"step": 720
},
{
"epoch": 17.03,
"grad_norm": 0.01003193762153387,
"learning_rate": 2.175925925925926e-05,
"loss": 0.3084,
"step": 730
},
{
"epoch": 17.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.57525372505188,
"eval_runtime": 138.9495,
"eval_samples_per_second": 0.367,
"eval_steps_per_second": 0.187,
"step": 738
},
{
"epoch": 18.0,
"grad_norm": 1.1518467664718628,
"learning_rate": 2.1296296296296296e-05,
"loss": 0.242,
"step": 740
},
{
"epoch": 18.01,
"grad_norm": 0.033695243299007416,
"learning_rate": 2.0833333333333336e-05,
"loss": 0.2333,
"step": 750
},
{
"epoch": 18.02,
"grad_norm": 0.06799116730690002,
"learning_rate": 2.037037037037037e-05,
"loss": 0.0012,
"step": 760
},
{
"epoch": 18.03,
"grad_norm": 0.00848899595439434,
"learning_rate": 1.990740740740741e-05,
"loss": 0.2476,
"step": 770
},
{
"epoch": 18.03,
"eval_accuracy": 0.6274509803921569,
"eval_loss": 2.4252748489379883,
"eval_runtime": 140.2598,
"eval_samples_per_second": 0.364,
"eval_steps_per_second": 0.185,
"step": 779
},
{
"epoch": 19.0,
"grad_norm": 0.015138844028115273,
"learning_rate": 1.9444444444444445e-05,
"loss": 0.3527,
"step": 780
},
{
"epoch": 19.01,
"grad_norm": 0.008504153229296207,
"learning_rate": 1.8981481481481482e-05,
"loss": 0.0087,
"step": 790
},
{
"epoch": 19.02,
"grad_norm": 140.87295532226562,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.2821,
"step": 800
},
{
"epoch": 19.03,
"grad_norm": 0.027083896100521088,
"learning_rate": 1.8055555555555555e-05,
"loss": 0.404,
"step": 810
},
{
"epoch": 19.03,
"grad_norm": 0.15064358711242676,
"learning_rate": 1.7592592592592595e-05,
"loss": 0.006,
"step": 820
},
{
"epoch": 19.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.4320342540740967,
"eval_runtime": 136.4673,
"eval_samples_per_second": 0.374,
"eval_steps_per_second": 0.191,
"step": 820
},
{
"epoch": 20.01,
"grad_norm": 0.05090714618563652,
"learning_rate": 1.712962962962963e-05,
"loss": 0.1214,
"step": 830
},
{
"epoch": 20.02,
"grad_norm": 0.05721502751111984,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.3519,
"step": 840
},
{
"epoch": 20.02,
"grad_norm": 0.025916827842593193,
"learning_rate": 1.6203703703703704e-05,
"loss": 0.0014,
"step": 850
},
{
"epoch": 20.03,
"grad_norm": 0.04987427219748497,
"learning_rate": 1.574074074074074e-05,
"loss": 0.0021,
"step": 860
},
{
"epoch": 20.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.718170166015625,
"eval_runtime": 143.3116,
"eval_samples_per_second": 0.356,
"eval_steps_per_second": 0.181,
"step": 861
},
{
"epoch": 21.01,
"grad_norm": 0.009684134274721146,
"learning_rate": 1.527777777777778e-05,
"loss": 0.22,
"step": 870
},
{
"epoch": 21.02,
"grad_norm": 20.616426467895508,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.2033,
"step": 880
},
{
"epoch": 21.02,
"grad_norm": 0.028017116710543633,
"learning_rate": 1.4351851851851853e-05,
"loss": 0.4944,
"step": 890
},
{
"epoch": 21.03,
"grad_norm": 0.04853179305791855,
"learning_rate": 1.388888888888889e-05,
"loss": 0.008,
"step": 900
},
{
"epoch": 21.03,
"eval_accuracy": 0.5882352941176471,
"eval_loss": 2.771120548248291,
"eval_runtime": 141.504,
"eval_samples_per_second": 0.36,
"eval_steps_per_second": 0.184,
"step": 902
},
{
"epoch": 22.01,
"grad_norm": 0.010195921175181866,
"learning_rate": 1.3425925925925928e-05,
"loss": 0.2491,
"step": 910
},
{
"epoch": 22.02,
"grad_norm": 0.058783795684576035,
"learning_rate": 1.2962962962962962e-05,
"loss": 0.1002,
"step": 920
},
{
"epoch": 22.02,
"grad_norm": 0.009438421577215195,
"learning_rate": 1.25e-05,
"loss": 0.2846,
"step": 930
},
{
"epoch": 22.03,
"grad_norm": 0.017627988010644913,
"learning_rate": 1.2037037037037037e-05,
"loss": 0.0011,
"step": 940
},
{
"epoch": 22.03,
"eval_accuracy": 0.5882352941176471,
"eval_loss": 2.808924674987793,
"eval_runtime": 140.1443,
"eval_samples_per_second": 0.364,
"eval_steps_per_second": 0.186,
"step": 943
},
{
"epoch": 23.01,
"grad_norm": 0.009988226927816868,
"learning_rate": 1.1574074074074075e-05,
"loss": 0.0157,
"step": 950
},
{
"epoch": 23.01,
"grad_norm": 0.02875751443207264,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.0036,
"step": 960
},
{
"epoch": 23.02,
"grad_norm": 0.5501790046691895,
"learning_rate": 1.0648148148148148e-05,
"loss": 0.0029,
"step": 970
},
{
"epoch": 23.03,
"grad_norm": 0.013142755255103111,
"learning_rate": 1.0185185185185185e-05,
"loss": 0.7546,
"step": 980
},
{
"epoch": 23.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.8142335414886475,
"eval_runtime": 147.8203,
"eval_samples_per_second": 0.345,
"eval_steps_per_second": 0.176,
"step": 984
},
{
"epoch": 24.0,
"grad_norm": 1.282440423965454,
"learning_rate": 9.722222222222223e-06,
"loss": 0.0821,
"step": 990
},
{
"epoch": 24.01,
"grad_norm": 0.011100966483354568,
"learning_rate": 9.259259259259259e-06,
"loss": 0.0014,
"step": 1000
},
{
"epoch": 24.02,
"grad_norm": 0.020793117582798004,
"learning_rate": 8.796296296296297e-06,
"loss": 0.1571,
"step": 1010
},
{
"epoch": 24.03,
"grad_norm": 0.02288602478802204,
"learning_rate": 8.333333333333334e-06,
"loss": 0.0006,
"step": 1020
},
{
"epoch": 24.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.8796093463897705,
"eval_runtime": 142.281,
"eval_samples_per_second": 0.358,
"eval_steps_per_second": 0.183,
"step": 1025
},
{
"epoch": 25.0,
"grad_norm": 0.031101850792765617,
"learning_rate": 7.87037037037037e-06,
"loss": 0.0051,
"step": 1030
},
{
"epoch": 25.01,
"grad_norm": 0.017316602170467377,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.0006,
"step": 1040
},
{
"epoch": 25.02,
"grad_norm": 0.015025320462882519,
"learning_rate": 6.944444444444445e-06,
"loss": 0.2444,
"step": 1050
},
{
"epoch": 25.03,
"grad_norm": 0.014637992717325687,
"learning_rate": 6.481481481481481e-06,
"loss": 0.0008,
"step": 1060
},
{
"epoch": 25.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.8485913276672363,
"eval_runtime": 140.4219,
"eval_samples_per_second": 0.363,
"eval_steps_per_second": 0.185,
"step": 1066
},
{
"epoch": 26.0,
"grad_norm": 0.1402996927499771,
"learning_rate": 6.0185185185185185e-06,
"loss": 0.1973,
"step": 1070
},
{
"epoch": 26.01,
"grad_norm": 0.006944969296455383,
"learning_rate": 5.555555555555556e-06,
"loss": 0.0005,
"step": 1080
},
{
"epoch": 26.02,
"grad_norm": 18.494768142700195,
"learning_rate": 5.092592592592592e-06,
"loss": 0.1727,
"step": 1090
},
{
"epoch": 26.03,
"grad_norm": 0.010701355524361134,
"learning_rate": 4.6296296296296296e-06,
"loss": 0.1745,
"step": 1100
},
{
"epoch": 26.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.8475284576416016,
"eval_runtime": 143.4719,
"eval_samples_per_second": 0.355,
"eval_steps_per_second": 0.181,
"step": 1107
},
{
"epoch": 27.0,
"grad_norm": 0.012416614219546318,
"learning_rate": 4.166666666666667e-06,
"loss": 0.0074,
"step": 1110
},
{
"epoch": 27.01,
"grad_norm": 0.06672808527946472,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.0006,
"step": 1120
},
{
"epoch": 27.02,
"grad_norm": 0.008200524374842644,
"learning_rate": 3.2407407407407406e-06,
"loss": 0.0006,
"step": 1130
},
{
"epoch": 27.03,
"grad_norm": 14.067534446716309,
"learning_rate": 2.777777777777778e-06,
"loss": 0.5421,
"step": 1140
},
{
"epoch": 27.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.8462321758270264,
"eval_runtime": 137.4454,
"eval_samples_per_second": 0.371,
"eval_steps_per_second": 0.189,
"step": 1148
},
{
"epoch": 28.0,
"grad_norm": 0.009883771650493145,
"learning_rate": 2.3148148148148148e-06,
"loss": 0.0889,
"step": 1150
},
{
"epoch": 28.01,
"grad_norm": 0.007996085099875927,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.1251,
"step": 1160
},
{
"epoch": 28.02,
"grad_norm": 0.008312738500535488,
"learning_rate": 1.388888888888889e-06,
"loss": 0.339,
"step": 1170
},
{
"epoch": 28.03,
"grad_norm": 0.07012984901666641,
"learning_rate": 9.259259259259259e-07,
"loss": 0.1233,
"step": 1180
},
{
"epoch": 28.03,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.8324010372161865,
"eval_runtime": 132.7093,
"eval_samples_per_second": 0.384,
"eval_steps_per_second": 0.196,
"step": 1189
},
{
"epoch": 29.0,
"grad_norm": 0.029733408242464066,
"learning_rate": 4.6296296296296297e-07,
"loss": 0.0054,
"step": 1190
},
{
"epoch": 29.01,
"grad_norm": 0.033483583480119705,
"learning_rate": 0.0,
"loss": 0.1298,
"step": 1200
},
{
"epoch": 29.01,
"eval_accuracy": 0.6078431372549019,
"eval_loss": 2.833205461502075,
"eval_runtime": 130.982,
"eval_samples_per_second": 0.389,
"eval_steps_per_second": 0.199,
"step": 1200
},
{
"epoch": 29.01,
"step": 1200,
"total_flos": 2.954419141637505e+18,
"train_loss": 0.40525790957190716,
"train_runtime": 10653.7383,
"train_samples_per_second": 0.225,
"train_steps_per_second": 0.113
},
{
"epoch": 29.01,
"eval_accuracy": 0.6274509803921569,
"eval_loss": 1.834560751914978,
"eval_runtime": 132.9753,
"eval_samples_per_second": 0.384,
"eval_steps_per_second": 0.196,
"step": 1200
}
],
"logging_steps": 10,
"max_steps": 1200,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 2.954419141637505e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}