jdannem6's picture
Uploaded checkpoint-10000
e9b63c8 verified
raw
history blame contribute delete
No virus
17.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5,
"eval_steps": 2000,
"global_step": 10000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.9697519540786743,
"learning_rate": 1e-06,
"loss": 0.1629,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 1.3848124742507935,
"learning_rate": 9.898989898989898e-07,
"loss": 0.14,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 0.9986572265625,
"learning_rate": 9.797979797979797e-07,
"loss": 0.1354,
"step": 300
},
{
"epoch": 0.02,
"grad_norm": 0.11438798904418945,
"learning_rate": 9.696969696969698e-07,
"loss": 0.1182,
"step": 400
},
{
"epoch": 0.03,
"grad_norm": 0.8548241257667542,
"learning_rate": 9.595959595959596e-07,
"loss": 0.1192,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 1.5312464237213135,
"learning_rate": 9.494949494949495e-07,
"loss": 0.0997,
"step": 600
},
{
"epoch": 0.04,
"grad_norm": 0.9692059755325317,
"learning_rate": 9.393939393939395e-07,
"loss": 0.102,
"step": 700
},
{
"epoch": 0.04,
"grad_norm": 0.42864611744880676,
"learning_rate": 9.292929292929292e-07,
"loss": 0.0901,
"step": 800
},
{
"epoch": 0.04,
"grad_norm": 0.852543830871582,
"learning_rate": 9.191919191919192e-07,
"loss": 0.0937,
"step": 900
},
{
"epoch": 0.05,
"grad_norm": 0.5718303322792053,
"learning_rate": 9.09090909090909e-07,
"loss": 0.093,
"step": 1000
},
{
"epoch": 0.06,
"grad_norm": 0.9396565556526184,
"learning_rate": 8.98989898989899e-07,
"loss": 0.0892,
"step": 1100
},
{
"epoch": 0.06,
"grad_norm": 0.08157779276371002,
"learning_rate": 8.888888888888888e-07,
"loss": 0.0934,
"step": 1200
},
{
"epoch": 0.07,
"grad_norm": 0.8076322078704834,
"learning_rate": 8.787878787878787e-07,
"loss": 0.0725,
"step": 1300
},
{
"epoch": 0.07,
"grad_norm": 1.5076119899749756,
"learning_rate": 8.686868686868687e-07,
"loss": 0.0835,
"step": 1400
},
{
"epoch": 0.07,
"grad_norm": 1.1567238569259644,
"learning_rate": 8.585858585858586e-07,
"loss": 0.0747,
"step": 1500
},
{
"epoch": 0.08,
"grad_norm": 0.6817927956581116,
"learning_rate": 8.484848484848484e-07,
"loss": 0.0903,
"step": 1600
},
{
"epoch": 0.09,
"grad_norm": 0.6467050313949585,
"learning_rate": 8.383838383838383e-07,
"loss": 0.0721,
"step": 1700
},
{
"epoch": 0.09,
"grad_norm": 1.8435570001602173,
"learning_rate": 8.282828282828283e-07,
"loss": 0.0847,
"step": 1800
},
{
"epoch": 0.1,
"grad_norm": 0.6265794634819031,
"learning_rate": 8.181818181818182e-07,
"loss": 0.0687,
"step": 1900
},
{
"epoch": 0.1,
"grad_norm": 1.360060453414917,
"learning_rate": 8.08080808080808e-07,
"loss": 0.0748,
"step": 2000
},
{
"epoch": 0.1,
"eval_loss": 0.06745574623346329,
"eval_runtime": 304.7718,
"eval_samples_per_second": 3.281,
"eval_steps_per_second": 0.82,
"step": 2000
},
{
"epoch": 0.1,
"grad_norm": 0.5074820518493652,
"learning_rate": 7.97979797979798e-07,
"loss": 0.0699,
"step": 2100
},
{
"epoch": 0.11,
"grad_norm": 1.3634223937988281,
"learning_rate": 7.878787878787878e-07,
"loss": 0.0682,
"step": 2200
},
{
"epoch": 0.12,
"grad_norm": 3.0763113498687744,
"learning_rate": 7.777777777777778e-07,
"loss": 0.0786,
"step": 2300
},
{
"epoch": 0.12,
"grad_norm": 1.5437021255493164,
"learning_rate": 7.676767676767675e-07,
"loss": 0.065,
"step": 2400
},
{
"epoch": 0.12,
"grad_norm": 1.5890172719955444,
"learning_rate": 7.575757575757575e-07,
"loss": 0.0772,
"step": 2500
},
{
"epoch": 0.13,
"grad_norm": 0.9904383420944214,
"learning_rate": 7.474747474747475e-07,
"loss": 0.0735,
"step": 2600
},
{
"epoch": 0.14,
"grad_norm": 0.9233341813087463,
"learning_rate": 7.373737373737373e-07,
"loss": 0.0695,
"step": 2700
},
{
"epoch": 0.14,
"grad_norm": 0.10603007674217224,
"learning_rate": 7.272727272727272e-07,
"loss": 0.0595,
"step": 2800
},
{
"epoch": 0.14,
"grad_norm": 0.9929779767990112,
"learning_rate": 7.171717171717171e-07,
"loss": 0.0595,
"step": 2900
},
{
"epoch": 0.15,
"grad_norm": 0.4711184799671173,
"learning_rate": 7.07070707070707e-07,
"loss": 0.0606,
"step": 3000
},
{
"epoch": 0.15,
"grad_norm": 0.5627967715263367,
"learning_rate": 6.96969696969697e-07,
"loss": 0.0713,
"step": 3100
},
{
"epoch": 0.16,
"grad_norm": 0.7645086646080017,
"learning_rate": 6.868686868686868e-07,
"loss": 0.0672,
"step": 3200
},
{
"epoch": 0.17,
"grad_norm": 0.8071433901786804,
"learning_rate": 6.767676767676767e-07,
"loss": 0.0809,
"step": 3300
},
{
"epoch": 0.17,
"grad_norm": 1.281545877456665,
"learning_rate": 6.666666666666666e-07,
"loss": 0.0577,
"step": 3400
},
{
"epoch": 0.17,
"grad_norm": 1.15431547164917,
"learning_rate": 6.565656565656566e-07,
"loss": 0.0613,
"step": 3500
},
{
"epoch": 0.18,
"grad_norm": 0.42522451281547546,
"learning_rate": 6.464646464646465e-07,
"loss": 0.0531,
"step": 3600
},
{
"epoch": 0.18,
"grad_norm": 1.2078278064727783,
"learning_rate": 6.363636363636363e-07,
"loss": 0.0692,
"step": 3700
},
{
"epoch": 0.19,
"grad_norm": 2.207512855529785,
"learning_rate": 6.262626262626263e-07,
"loss": 0.0508,
"step": 3800
},
{
"epoch": 0.2,
"grad_norm": 1.696768045425415,
"learning_rate": 6.161616161616161e-07,
"loss": 0.0655,
"step": 3900
},
{
"epoch": 0.2,
"grad_norm": 0.6295761466026306,
"learning_rate": 6.060606060606061e-07,
"loss": 0.0577,
"step": 4000
},
{
"epoch": 0.2,
"eval_loss": 0.057661667466163635,
"eval_runtime": 304.4519,
"eval_samples_per_second": 3.285,
"eval_steps_per_second": 0.821,
"step": 4000
},
{
"epoch": 0.2,
"grad_norm": 1.7527456283569336,
"learning_rate": 5.959595959595959e-07,
"loss": 0.0575,
"step": 4100
},
{
"epoch": 0.21,
"grad_norm": 2.4716615676879883,
"learning_rate": 5.858585858585858e-07,
"loss": 0.0768,
"step": 4200
},
{
"epoch": 0.21,
"grad_norm": 0.5344067215919495,
"learning_rate": 5.757575757575758e-07,
"loss": 0.0571,
"step": 4300
},
{
"epoch": 0.22,
"grad_norm": 0.9244055151939392,
"learning_rate": 5.656565656565657e-07,
"loss": 0.0563,
"step": 4400
},
{
"epoch": 0.23,
"grad_norm": 1.16805100440979,
"learning_rate": 5.555555555555555e-07,
"loss": 0.0629,
"step": 4500
},
{
"epoch": 0.23,
"grad_norm": 1.6229370832443237,
"learning_rate": 5.454545454545454e-07,
"loss": 0.0641,
"step": 4600
},
{
"epoch": 0.23,
"grad_norm": 0.9978136420249939,
"learning_rate": 5.353535353535354e-07,
"loss": 0.0642,
"step": 4700
},
{
"epoch": 0.24,
"grad_norm": 0.4647519886493683,
"learning_rate": 5.252525252525253e-07,
"loss": 0.0661,
"step": 4800
},
{
"epoch": 0.24,
"grad_norm": 1.3375582695007324,
"learning_rate": 5.152525252525253e-07,
"loss": 0.0595,
"step": 4900
},
{
"epoch": 0.25,
"grad_norm": 0.08847852051258087,
"learning_rate": 5.051515151515151e-07,
"loss": 0.0461,
"step": 5000
},
{
"epoch": 0.26,
"grad_norm": 1.0537497997283936,
"learning_rate": 4.95050505050505e-07,
"loss": 0.0494,
"step": 5100
},
{
"epoch": 0.26,
"grad_norm": 0.9269624948501587,
"learning_rate": 4.849494949494949e-07,
"loss": 0.0714,
"step": 5200
},
{
"epoch": 0.27,
"grad_norm": 0.7937563061714172,
"learning_rate": 4.748484848484848e-07,
"loss": 0.065,
"step": 5300
},
{
"epoch": 0.27,
"grad_norm": 1.4173548221588135,
"learning_rate": 4.6474747474747473e-07,
"loss": 0.0496,
"step": 5400
},
{
"epoch": 0.28,
"grad_norm": 0.37847477197647095,
"learning_rate": 4.546464646464646e-07,
"loss": 0.0651,
"step": 5500
},
{
"epoch": 0.28,
"grad_norm": 0.7063392400741577,
"learning_rate": 4.445454545454545e-07,
"loss": 0.0562,
"step": 5600
},
{
"epoch": 0.28,
"grad_norm": 0.6490029096603394,
"learning_rate": 4.344444444444444e-07,
"loss": 0.0695,
"step": 5700
},
{
"epoch": 0.29,
"grad_norm": 0.8921324610710144,
"learning_rate": 4.2434343434343434e-07,
"loss": 0.0733,
"step": 5800
},
{
"epoch": 0.29,
"grad_norm": 0.5432973504066467,
"learning_rate": 4.142424242424242e-07,
"loss": 0.0589,
"step": 5900
},
{
"epoch": 0.3,
"grad_norm": 0.017520183697342873,
"learning_rate": 4.041414141414141e-07,
"loss": 0.0541,
"step": 6000
},
{
"epoch": 0.3,
"eval_loss": 0.06119654327630997,
"eval_runtime": 305.9355,
"eval_samples_per_second": 3.269,
"eval_steps_per_second": 0.817,
"step": 6000
},
{
"epoch": 0.3,
"grad_norm": 2.68011212348938,
"learning_rate": 3.94040404040404e-07,
"loss": 0.0568,
"step": 6100
},
{
"epoch": 0.31,
"grad_norm": 2.3149752616882324,
"learning_rate": 3.839393939393939e-07,
"loss": 0.056,
"step": 6200
},
{
"epoch": 0.32,
"grad_norm": 0.04929787665605545,
"learning_rate": 3.738383838383838e-07,
"loss": 0.053,
"step": 6300
},
{
"epoch": 0.32,
"grad_norm": 0.7595096826553345,
"learning_rate": 3.6373737373737373e-07,
"loss": 0.0651,
"step": 6400
},
{
"epoch": 0.33,
"grad_norm": 0.08241419494152069,
"learning_rate": 3.536363636363636e-07,
"loss": 0.0729,
"step": 6500
},
{
"epoch": 0.33,
"grad_norm": 0.7712632417678833,
"learning_rate": 3.435353535353535e-07,
"loss": 0.0533,
"step": 6600
},
{
"epoch": 0.34,
"grad_norm": 0.8940716981887817,
"learning_rate": 3.3343434343434343e-07,
"loss": 0.0583,
"step": 6700
},
{
"epoch": 0.34,
"grad_norm": 0.07956444472074509,
"learning_rate": 3.233333333333333e-07,
"loss": 0.058,
"step": 6800
},
{
"epoch": 0.34,
"grad_norm": 0.6199010610580444,
"learning_rate": 3.1323232323232326e-07,
"loss": 0.0568,
"step": 6900
},
{
"epoch": 0.35,
"grad_norm": 0.5326449275016785,
"learning_rate": 3.031313131313131e-07,
"loss": 0.0525,
"step": 7000
},
{
"epoch": 0.35,
"grad_norm": 0.6375436186790466,
"learning_rate": 2.9303030303030304e-07,
"loss": 0.0523,
"step": 7100
},
{
"epoch": 0.36,
"grad_norm": 1.2404210567474365,
"learning_rate": 2.829292929292929e-07,
"loss": 0.0589,
"step": 7200
},
{
"epoch": 0.36,
"grad_norm": 1.2247310876846313,
"learning_rate": 2.728282828282828e-07,
"loss": 0.0586,
"step": 7300
},
{
"epoch": 0.37,
"grad_norm": 0.5559460520744324,
"learning_rate": 2.6272727272727273e-07,
"loss": 0.0578,
"step": 7400
},
{
"epoch": 0.38,
"grad_norm": 0.2506352961063385,
"learning_rate": 2.5262626262626265e-07,
"loss": 0.0555,
"step": 7500
},
{
"epoch": 0.38,
"grad_norm": 0.7911235094070435,
"learning_rate": 2.425252525252525e-07,
"loss": 0.0641,
"step": 7600
},
{
"epoch": 0.39,
"grad_norm": 0.21588227152824402,
"learning_rate": 2.324242424242424e-07,
"loss": 0.068,
"step": 7700
},
{
"epoch": 0.39,
"grad_norm": 0.5807000994682312,
"learning_rate": 2.223232323232323e-07,
"loss": 0.061,
"step": 7800
},
{
"epoch": 0.4,
"grad_norm": 2.298220157623291,
"learning_rate": 2.122222222222222e-07,
"loss": 0.0594,
"step": 7900
},
{
"epoch": 0.4,
"grad_norm": 1.0665823221206665,
"learning_rate": 2.021212121212121e-07,
"loss": 0.052,
"step": 8000
},
{
"epoch": 0.4,
"eval_loss": 0.050024934113025665,
"eval_runtime": 304.3337,
"eval_samples_per_second": 3.286,
"eval_steps_per_second": 0.821,
"step": 8000
},
{
"epoch": 0.41,
"grad_norm": 0.76180499792099,
"learning_rate": 1.92020202020202e-07,
"loss": 0.0611,
"step": 8100
},
{
"epoch": 0.41,
"grad_norm": 0.549780547618866,
"learning_rate": 1.8191919191919193e-07,
"loss": 0.0612,
"step": 8200
},
{
"epoch": 0.41,
"grad_norm": 2.0332558155059814,
"learning_rate": 1.7181818181818182e-07,
"loss": 0.0545,
"step": 8300
},
{
"epoch": 0.42,
"grad_norm": 0.8247748017311096,
"learning_rate": 1.617171717171717e-07,
"loss": 0.0616,
"step": 8400
},
{
"epoch": 0.42,
"grad_norm": 1.0830433368682861,
"learning_rate": 1.5161616161616162e-07,
"loss": 0.0553,
"step": 8500
},
{
"epoch": 0.43,
"grad_norm": 0.06752818822860718,
"learning_rate": 1.4151515151515151e-07,
"loss": 0.044,
"step": 8600
},
{
"epoch": 0.43,
"grad_norm": 2.645291805267334,
"learning_rate": 1.3141414141414143e-07,
"loss": 0.049,
"step": 8700
},
{
"epoch": 0.44,
"grad_norm": 0.5109922289848328,
"learning_rate": 1.2141414141414142e-07,
"loss": 0.0597,
"step": 8800
},
{
"epoch": 0.45,
"grad_norm": 1.7064566612243652,
"learning_rate": 1.1131313131313131e-07,
"loss": 0.0731,
"step": 8900
},
{
"epoch": 0.45,
"grad_norm": 0.9522804617881775,
"learning_rate": 1.0121212121212121e-07,
"loss": 0.0641,
"step": 9000
},
{
"epoch": 0.46,
"grad_norm": 0.425570011138916,
"learning_rate": 9.111111111111112e-08,
"loss": 0.0531,
"step": 9100
},
{
"epoch": 0.46,
"grad_norm": 1.16694176197052,
"learning_rate": 8.1010101010101e-08,
"loss": 0.0517,
"step": 9200
},
{
"epoch": 0.47,
"grad_norm": 0.6792075634002686,
"learning_rate": 7.090909090909091e-08,
"loss": 0.0644,
"step": 9300
},
{
"epoch": 0.47,
"grad_norm": 1.2597424983978271,
"learning_rate": 6.080808080808081e-08,
"loss": 0.0528,
"step": 9400
},
{
"epoch": 0.47,
"grad_norm": 4.67294454574585,
"learning_rate": 5.0707070707070707e-08,
"loss": 0.0559,
"step": 9500
},
{
"epoch": 0.48,
"grad_norm": 1.676389455795288,
"learning_rate": 4.06060606060606e-08,
"loss": 0.0589,
"step": 9600
},
{
"epoch": 0.48,
"grad_norm": 2.2427666187286377,
"learning_rate": 3.0505050505050505e-08,
"loss": 0.0645,
"step": 9700
},
{
"epoch": 0.49,
"grad_norm": 0.02416997402906418,
"learning_rate": 2.0404040404040402e-08,
"loss": 0.0453,
"step": 9800
},
{
"epoch": 0.49,
"grad_norm": 2.5054471492767334,
"learning_rate": 1.0303030303030303e-08,
"loss": 0.0491,
"step": 9900
},
{
"epoch": 0.5,
"grad_norm": 0.6195088028907776,
"learning_rate": 2.0202020202020202e-10,
"loss": 0.054,
"step": 10000
},
{
"epoch": 0.5,
"eval_loss": 0.059650979936122894,
"eval_runtime": 304.5847,
"eval_samples_per_second": 3.283,
"eval_steps_per_second": 0.821,
"step": 10000
}
],
"logging_steps": 100,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"total_flos": 8.1602751234048e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}