cterdam's picture
Upload folder using huggingface_hub
4e68a7a verified
raw
history blame contribute delete
No virus
14 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4,
"eval_steps": 2000,
"global_step": 8000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 12.253900527954102,
"learning_rate": 9.8e-07,
"loss": 0.3335,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 9.193947792053223,
"learning_rate": 9.9010101010101e-07,
"loss": 0.2105,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 5.168125152587891,
"learning_rate": 9.8e-07,
"loss": 0.1618,
"step": 300
},
{
"epoch": 0.02,
"grad_norm": 2.163803815841675,
"learning_rate": 9.698989898989898e-07,
"loss": 0.1418,
"step": 400
},
{
"epoch": 0.03,
"grad_norm": 10.751598358154297,
"learning_rate": 9.597979797979797e-07,
"loss": 0.1215,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 7.5342793464660645,
"learning_rate": 9.496969696969696e-07,
"loss": 0.1142,
"step": 600
},
{
"epoch": 0.04,
"grad_norm": 8.430835723876953,
"learning_rate": 9.395959595959596e-07,
"loss": 0.121,
"step": 700
},
{
"epoch": 0.04,
"grad_norm": 1.8440964221954346,
"learning_rate": 9.295959595959596e-07,
"loss": 0.0969,
"step": 800
},
{
"epoch": 0.04,
"grad_norm": 2.699073553085327,
"learning_rate": 9.194949494949495e-07,
"loss": 0.1024,
"step": 900
},
{
"epoch": 0.05,
"grad_norm": 11.251789093017578,
"learning_rate": 9.093939393939394e-07,
"loss": 0.0899,
"step": 1000
},
{
"epoch": 0.06,
"grad_norm": 7.728929042816162,
"learning_rate": 8.992929292929292e-07,
"loss": 0.0837,
"step": 1100
},
{
"epoch": 0.06,
"grad_norm": 1.7401416301727295,
"learning_rate": 8.891919191919191e-07,
"loss": 0.0914,
"step": 1200
},
{
"epoch": 0.07,
"grad_norm": 3.5504443645477295,
"learning_rate": 8.790909090909091e-07,
"loss": 0.0754,
"step": 1300
},
{
"epoch": 0.07,
"grad_norm": 5.9004316329956055,
"learning_rate": 8.68989898989899e-07,
"loss": 0.0791,
"step": 1400
},
{
"epoch": 0.07,
"grad_norm": 2.9171862602233887,
"learning_rate": 8.588888888888888e-07,
"loss": 0.0866,
"step": 1500
},
{
"epoch": 0.08,
"grad_norm": 5.907050132751465,
"learning_rate": 8.487878787878787e-07,
"loss": 0.0768,
"step": 1600
},
{
"epoch": 0.09,
"grad_norm": 4.856807708740234,
"learning_rate": 8.386868686868687e-07,
"loss": 0.0745,
"step": 1700
},
{
"epoch": 0.09,
"grad_norm": 2.4457929134368896,
"learning_rate": 8.285858585858585e-07,
"loss": 0.0808,
"step": 1800
},
{
"epoch": 0.1,
"grad_norm": 3.084287643432617,
"learning_rate": 8.184848484848484e-07,
"loss": 0.0743,
"step": 1900
},
{
"epoch": 0.1,
"grad_norm": 2.936805486679077,
"learning_rate": 8.083838383838384e-07,
"loss": 0.0675,
"step": 2000
},
{
"epoch": 0.1,
"eval_loss": 0.07925247400999069,
"eval_runtime": 204.8299,
"eval_samples_per_second": 4.882,
"eval_steps_per_second": 1.221,
"step": 2000
},
{
"epoch": 0.1,
"grad_norm": 13.029026985168457,
"learning_rate": 7.982828282828282e-07,
"loss": 0.0681,
"step": 2100
},
{
"epoch": 0.11,
"grad_norm": 10.308150291442871,
"learning_rate": 7.881818181818182e-07,
"loss": 0.0798,
"step": 2200
},
{
"epoch": 0.12,
"grad_norm": 5.64780330657959,
"learning_rate": 7.78080808080808e-07,
"loss": 0.0648,
"step": 2300
},
{
"epoch": 0.12,
"grad_norm": 6.982935905456543,
"learning_rate": 7.679797979797979e-07,
"loss": 0.0731,
"step": 2400
},
{
"epoch": 0.12,
"grad_norm": 3.5423080921173096,
"learning_rate": 7.578787878787879e-07,
"loss": 0.0817,
"step": 2500
},
{
"epoch": 0.13,
"grad_norm": 3.585963249206543,
"learning_rate": 7.477777777777778e-07,
"loss": 0.072,
"step": 2600
},
{
"epoch": 0.14,
"grad_norm": 7.086956977844238,
"learning_rate": 7.376767676767676e-07,
"loss": 0.0577,
"step": 2700
},
{
"epoch": 0.14,
"grad_norm": 4.985733509063721,
"learning_rate": 7.275757575757575e-07,
"loss": 0.0672,
"step": 2800
},
{
"epoch": 0.14,
"grad_norm": 1.4518052339553833,
"learning_rate": 7.174747474747475e-07,
"loss": 0.0616,
"step": 2900
},
{
"epoch": 0.15,
"grad_norm": 5.120323657989502,
"learning_rate": 7.073737373737373e-07,
"loss": 0.0625,
"step": 3000
},
{
"epoch": 0.15,
"grad_norm": 1.0982943773269653,
"learning_rate": 6.972727272727273e-07,
"loss": 0.0666,
"step": 3100
},
{
"epoch": 0.16,
"grad_norm": 1.8519880771636963,
"learning_rate": 6.871717171717171e-07,
"loss": 0.0786,
"step": 3200
},
{
"epoch": 0.17,
"grad_norm": 3.394327163696289,
"learning_rate": 6.77070707070707e-07,
"loss": 0.0786,
"step": 3300
},
{
"epoch": 0.17,
"grad_norm": 9.026866912841797,
"learning_rate": 6.66969696969697e-07,
"loss": 0.0677,
"step": 3400
},
{
"epoch": 0.17,
"grad_norm": 3.718776226043701,
"learning_rate": 6.568686868686868e-07,
"loss": 0.0576,
"step": 3500
},
{
"epoch": 0.18,
"grad_norm": 0.7873659729957581,
"learning_rate": 6.467676767676767e-07,
"loss": 0.0559,
"step": 3600
},
{
"epoch": 0.18,
"grad_norm": 5.039899826049805,
"learning_rate": 6.366666666666667e-07,
"loss": 0.0668,
"step": 3700
},
{
"epoch": 0.19,
"grad_norm": 2.4250056743621826,
"learning_rate": 6.266666666666667e-07,
"loss": 0.0524,
"step": 3800
},
{
"epoch": 0.2,
"grad_norm": 7.657347202301025,
"learning_rate": 6.165656565656565e-07,
"loss": 0.0665,
"step": 3900
},
{
"epoch": 0.2,
"grad_norm": 3.9768455028533936,
"learning_rate": 6.064646464646465e-07,
"loss": 0.0649,
"step": 4000
},
{
"epoch": 0.2,
"eval_loss": 0.06388480961322784,
"eval_runtime": 200.3994,
"eval_samples_per_second": 4.99,
"eval_steps_per_second": 1.248,
"step": 4000
},
{
"epoch": 0.2,
"grad_norm": 6.998288154602051,
"learning_rate": 5.963636363636363e-07,
"loss": 0.0488,
"step": 4100
},
{
"epoch": 0.21,
"grad_norm": 2.007672071456909,
"learning_rate": 5.862626262626262e-07,
"loss": 0.0683,
"step": 4200
},
{
"epoch": 0.21,
"grad_norm": 0.5769347548484802,
"learning_rate": 5.761616161616162e-07,
"loss": 0.0544,
"step": 4300
},
{
"epoch": 0.22,
"grad_norm": 0.005931401159614325,
"learning_rate": 5.660606060606061e-07,
"loss": 0.0603,
"step": 4400
},
{
"epoch": 0.23,
"grad_norm": 8.401103973388672,
"learning_rate": 5.559595959595959e-07,
"loss": 0.0531,
"step": 4500
},
{
"epoch": 0.23,
"grad_norm": 3.780881404876709,
"learning_rate": 5.459595959595959e-07,
"loss": 0.0729,
"step": 4600
},
{
"epoch": 0.23,
"grad_norm": 8.754217147827148,
"learning_rate": 5.358585858585858e-07,
"loss": 0.0766,
"step": 4700
},
{
"epoch": 0.24,
"grad_norm": 2.9296135902404785,
"learning_rate": 5.257575757575757e-07,
"loss": 0.0651,
"step": 4800
},
{
"epoch": 0.24,
"grad_norm": 4.884037494659424,
"learning_rate": 5.156565656565657e-07,
"loss": 0.0623,
"step": 4900
},
{
"epoch": 0.25,
"grad_norm": 1.247363567352295,
"learning_rate": 5.055555555555555e-07,
"loss": 0.0498,
"step": 5000
},
{
"epoch": 0.26,
"grad_norm": 5.299038887023926,
"learning_rate": 4.954545454545454e-07,
"loss": 0.0558,
"step": 5100
},
{
"epoch": 0.26,
"grad_norm": 3.8896191120147705,
"learning_rate": 4.853535353535353e-07,
"loss": 0.0733,
"step": 5200
},
{
"epoch": 0.27,
"grad_norm": 2.1511714458465576,
"learning_rate": 4.752525252525252e-07,
"loss": 0.0647,
"step": 5300
},
{
"epoch": 0.27,
"grad_norm": 3.958899736404419,
"learning_rate": 4.6515151515151513e-07,
"loss": 0.0594,
"step": 5400
},
{
"epoch": 0.28,
"grad_norm": 7.097384929656982,
"learning_rate": 4.55050505050505e-07,
"loss": 0.0623,
"step": 5500
},
{
"epoch": 0.28,
"grad_norm": 6.70374870300293,
"learning_rate": 4.449494949494949e-07,
"loss": 0.0669,
"step": 5600
},
{
"epoch": 0.28,
"grad_norm": 1.3009291887283325,
"learning_rate": 4.3484848484848483e-07,
"loss": 0.0528,
"step": 5700
},
{
"epoch": 0.29,
"grad_norm": 4.70461368560791,
"learning_rate": 4.2474747474747474e-07,
"loss": 0.0668,
"step": 5800
},
{
"epoch": 0.29,
"grad_norm": 0.03208538889884949,
"learning_rate": 4.1464646464646466e-07,
"loss": 0.0641,
"step": 5900
},
{
"epoch": 0.3,
"grad_norm": 0.0016973208403214812,
"learning_rate": 4.045454545454545e-07,
"loss": 0.064,
"step": 6000
},
{
"epoch": 0.3,
"eval_loss": 0.06021637097001076,
"eval_runtime": 200.1582,
"eval_samples_per_second": 4.996,
"eval_steps_per_second": 1.249,
"step": 6000
},
{
"epoch": 0.3,
"grad_norm": 9.762524604797363,
"learning_rate": 3.9444444444444444e-07,
"loss": 0.0639,
"step": 6100
},
{
"epoch": 0.31,
"grad_norm": 6.966504096984863,
"learning_rate": 3.843434343434343e-07,
"loss": 0.0541,
"step": 6200
},
{
"epoch": 0.32,
"grad_norm": 4.614800930023193,
"learning_rate": 3.7424242424242427e-07,
"loss": 0.049,
"step": 6300
},
{
"epoch": 0.32,
"grad_norm": 1.268702745437622,
"learning_rate": 3.6414141414141413e-07,
"loss": 0.0571,
"step": 6400
},
{
"epoch": 0.33,
"grad_norm": 0.18738865852355957,
"learning_rate": 3.5404040404040405e-07,
"loss": 0.0701,
"step": 6500
},
{
"epoch": 0.33,
"grad_norm": 3.5662453174591064,
"learning_rate": 3.439393939393939e-07,
"loss": 0.0513,
"step": 6600
},
{
"epoch": 0.34,
"grad_norm": 15.757116317749023,
"learning_rate": 3.3383838383838383e-07,
"loss": 0.0542,
"step": 6700
},
{
"epoch": 0.34,
"grad_norm": 1.947461485862732,
"learning_rate": 3.237373737373737e-07,
"loss": 0.0616,
"step": 6800
},
{
"epoch": 0.34,
"grad_norm": 1.4099074602127075,
"learning_rate": 3.1363636363636366e-07,
"loss": 0.067,
"step": 6900
},
{
"epoch": 0.35,
"grad_norm": 4.870973587036133,
"learning_rate": 3.035353535353535e-07,
"loss": 0.0547,
"step": 7000
},
{
"epoch": 0.35,
"grad_norm": 5.2733049392700195,
"learning_rate": 2.9343434343434344e-07,
"loss": 0.0489,
"step": 7100
},
{
"epoch": 0.36,
"grad_norm": 1.496048927307129,
"learning_rate": 2.833333333333333e-07,
"loss": 0.0528,
"step": 7200
},
{
"epoch": 0.36,
"grad_norm": 5.224077224731445,
"learning_rate": 2.732323232323232e-07,
"loss": 0.0587,
"step": 7300
},
{
"epoch": 0.37,
"grad_norm": 7.1024699211120605,
"learning_rate": 2.631313131313131e-07,
"loss": 0.0564,
"step": 7400
},
{
"epoch": 0.38,
"grad_norm": 2.4630818367004395,
"learning_rate": 2.5303030303030305e-07,
"loss": 0.0567,
"step": 7500
},
{
"epoch": 0.38,
"grad_norm": 5.36046838760376,
"learning_rate": 2.429292929292929e-07,
"loss": 0.0453,
"step": 7600
},
{
"epoch": 0.39,
"grad_norm": 3.9095382690429688,
"learning_rate": 2.3282828282828283e-07,
"loss": 0.0626,
"step": 7700
},
{
"epoch": 0.39,
"grad_norm": 0.7284829020500183,
"learning_rate": 2.2272727272727272e-07,
"loss": 0.0621,
"step": 7800
},
{
"epoch": 0.4,
"grad_norm": 0.15286648273468018,
"learning_rate": 2.1262626262626264e-07,
"loss": 0.0535,
"step": 7900
},
{
"epoch": 0.4,
"grad_norm": 5.735705852508545,
"learning_rate": 2.0252525252525253e-07,
"loss": 0.053,
"step": 8000
},
{
"epoch": 0.4,
"eval_loss": 0.05654788389801979,
"eval_runtime": 200.4178,
"eval_samples_per_second": 4.99,
"eval_steps_per_second": 1.247,
"step": 8000
}
],
"logging_steps": 100,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 2000,
"total_flos": 7.54256789372928e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}