|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.868411617067049, |
|
"eval_steps": 2000, |
|
"global_step": 8000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 2.6817848682403564, |
|
"learning_rate": 1e-06, |
|
"loss": 1.077, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 3.5721559524536133, |
|
"learning_rate": 9.898989898989898e-07, |
|
"loss": 0.8347, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 4.235135078430176, |
|
"learning_rate": 9.7989898989899e-07, |
|
"loss": 0.6862, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 2.8351337909698486, |
|
"learning_rate": 9.697979797979798e-07, |
|
"loss": 0.666, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 4.0803093910217285, |
|
"learning_rate": 9.598989898989899e-07, |
|
"loss": 0.6536, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 3.8616509437561035, |
|
"learning_rate": 9.497979797979798e-07, |
|
"loss": 0.6579, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 3.376267194747925, |
|
"learning_rate": 9.396969696969696e-07, |
|
"loss": 0.6223, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 3.7751758098602295, |
|
"learning_rate": 9.295959595959596e-07, |
|
"loss": 0.6141, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 3.669135093688965, |
|
"learning_rate": 9.194949494949495e-07, |
|
"loss": 0.627, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 3.837573528289795, |
|
"learning_rate": 9.093939393939394e-07, |
|
"loss": 0.6309, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 3.461555242538452, |
|
"learning_rate": 8.992929292929292e-07, |
|
"loss": 0.6138, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 2.907550573348999, |
|
"learning_rate": 8.891919191919191e-07, |
|
"loss": 0.6367, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 5.007347106933594, |
|
"learning_rate": 8.790909090909091e-07, |
|
"loss": 0.6444, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 4.709310054779053, |
|
"learning_rate": 8.68989898989899e-07, |
|
"loss": 0.6418, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 2.976135730743408, |
|
"learning_rate": 8.588888888888888e-07, |
|
"loss": 0.6572, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 2.4408750534057617, |
|
"learning_rate": 8.487878787878787e-07, |
|
"loss": 0.6347, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 4.144632816314697, |
|
"learning_rate": 8.386868686868687e-07, |
|
"loss": 0.6001, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 3.0854594707489014, |
|
"learning_rate": 8.285858585858585e-07, |
|
"loss": 0.6446, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 4.574347496032715, |
|
"learning_rate": 8.185858585858586e-07, |
|
"loss": 0.6206, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 4.453900337219238, |
|
"learning_rate": 8.084848484848484e-07, |
|
"loss": 0.6555, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_loss": 0.7467872500419617, |
|
"eval_runtime": 199.2545, |
|
"eval_samples_per_second": 5.019, |
|
"eval_steps_per_second": 1.255, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 4.515957832336426, |
|
"learning_rate": 7.983838383838384e-07, |
|
"loss": 0.6001, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 5.094139099121094, |
|
"learning_rate": 7.882828282828282e-07, |
|
"loss": 0.5989, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 3.6258816719055176, |
|
"learning_rate": 7.781818181818182e-07, |
|
"loss": 0.5893, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 3.713240385055542, |
|
"learning_rate": 7.68080808080808e-07, |
|
"loss": 0.5944, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 2.879484176635742, |
|
"learning_rate": 7.579797979797979e-07, |
|
"loss": 0.6098, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 6.193981170654297, |
|
"learning_rate": 7.478787878787879e-07, |
|
"loss": 0.6232, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 5.713097095489502, |
|
"learning_rate": 7.377777777777777e-07, |
|
"loss": 0.5912, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 4.410184383392334, |
|
"learning_rate": 7.276767676767677e-07, |
|
"loss": 0.6023, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 5.159379005432129, |
|
"learning_rate": 7.175757575757575e-07, |
|
"loss": 0.6148, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 4.885188102722168, |
|
"learning_rate": 7.074747474747474e-07, |
|
"loss": 0.5715, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 2.8274130821228027, |
|
"learning_rate": 6.973737373737374e-07, |
|
"loss": 0.5843, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 4.313437461853027, |
|
"learning_rate": 6.872727272727273e-07, |
|
"loss": 0.5781, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 3.335306167602539, |
|
"learning_rate": 6.771717171717171e-07, |
|
"loss": 0.5875, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 3.799851417541504, |
|
"learning_rate": 6.67070707070707e-07, |
|
"loss": 0.5903, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 5.783286094665527, |
|
"learning_rate": 6.56969696969697e-07, |
|
"loss": 0.591, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 2.4970409870147705, |
|
"learning_rate": 6.468686868686868e-07, |
|
"loss": 0.576, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 3.698167562484741, |
|
"learning_rate": 6.367676767676767e-07, |
|
"loss": 0.5971, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"grad_norm": 5.005897521972656, |
|
"learning_rate": 6.266666666666667e-07, |
|
"loss": 0.5847, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 4.98276424407959, |
|
"learning_rate": 6.165656565656565e-07, |
|
"loss": 0.6004, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 4.1894402503967285, |
|
"learning_rate": 6.064646464646465e-07, |
|
"loss": 0.594, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_loss": 0.7255015969276428, |
|
"eval_runtime": 204.1006, |
|
"eval_samples_per_second": 4.9, |
|
"eval_steps_per_second": 1.225, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 4.014637470245361, |
|
"learning_rate": 5.963636363636363e-07, |
|
"loss": 0.6014, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 4.055298805236816, |
|
"learning_rate": 5.862626262626262e-07, |
|
"loss": 0.575, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 3.810657262802124, |
|
"learning_rate": 5.761616161616162e-07, |
|
"loss": 0.5879, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 2.8757894039154053, |
|
"learning_rate": 5.660606060606061e-07, |
|
"loss": 0.5864, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 5.22218132019043, |
|
"learning_rate": 5.559595959595959e-07, |
|
"loss": 0.5881, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 4.59492826461792, |
|
"learning_rate": 5.458585858585858e-07, |
|
"loss": 0.5628, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 4.455421447753906, |
|
"learning_rate": 5.357575757575758e-07, |
|
"loss": 0.6008, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 4.579429626464844, |
|
"learning_rate": 5.256565656565657e-07, |
|
"loss": 0.5957, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 5.034527778625488, |
|
"learning_rate": 5.155555555555555e-07, |
|
"loss": 0.556, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 6.71533203125, |
|
"learning_rate": 5.054545454545454e-07, |
|
"loss": 0.5811, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 3.7940480709075928, |
|
"learning_rate": 4.953535353535353e-07, |
|
"loss": 0.5907, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 4.488739013671875, |
|
"learning_rate": 4.852525252525253e-07, |
|
"loss": 0.5975, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 4.032153129577637, |
|
"learning_rate": 4.7515151515151514e-07, |
|
"loss": 0.5771, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 4.4360504150390625, |
|
"learning_rate": 4.65050505050505e-07, |
|
"loss": 0.5625, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 4.770937919616699, |
|
"learning_rate": 4.549494949494949e-07, |
|
"loss": 0.5647, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"grad_norm": 4.940979957580566, |
|
"learning_rate": 4.448484848484848e-07, |
|
"loss": 0.5871, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 3.6415045261383057, |
|
"learning_rate": 4.3474747474747475e-07, |
|
"loss": 0.5615, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 5.734036922454834, |
|
"learning_rate": 4.246464646464646e-07, |
|
"loss": 0.5491, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"grad_norm": 7.926692008972168, |
|
"learning_rate": 4.1454545454545453e-07, |
|
"loss": 0.5602, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 3.576627492904663, |
|
"learning_rate": 4.044444444444444e-07, |
|
"loss": 0.596, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"eval_loss": 0.7305542826652527, |
|
"eval_runtime": 199.7984, |
|
"eval_samples_per_second": 5.005, |
|
"eval_steps_per_second": 1.251, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"grad_norm": 5.2710347175598145, |
|
"learning_rate": 3.943434343434343e-07, |
|
"loss": 0.558, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"grad_norm": 6.788084030151367, |
|
"learning_rate": 3.8424242424242423e-07, |
|
"loss": 0.5531, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"grad_norm": 5.253055572509766, |
|
"learning_rate": 3.7414141414141414e-07, |
|
"loss": 0.5767, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 5.5620341300964355, |
|
"learning_rate": 3.6414141414141413e-07, |
|
"loss": 0.5362, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 5.042788505554199, |
|
"learning_rate": 3.5404040404040405e-07, |
|
"loss": 0.5863, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 4.5857415199279785, |
|
"learning_rate": 3.439393939393939e-07, |
|
"loss": 0.5759, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 5.151732444763184, |
|
"learning_rate": 3.3383838383838383e-07, |
|
"loss": 0.5481, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 4.4784393310546875, |
|
"learning_rate": 3.237373737373737e-07, |
|
"loss": 0.5686, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"grad_norm": 5.631476879119873, |
|
"learning_rate": 3.1363636363636366e-07, |
|
"loss": 0.5803, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"grad_norm": 6.012638092041016, |
|
"learning_rate": 3.035353535353535e-07, |
|
"loss": 0.5937, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 5.126097202301025, |
|
"learning_rate": 2.9343434343434344e-07, |
|
"loss": 0.57, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"grad_norm": 6.094301700592041, |
|
"learning_rate": 2.833333333333333e-07, |
|
"loss": 0.5468, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"grad_norm": 5.276275634765625, |
|
"learning_rate": 2.732323232323232e-07, |
|
"loss": 0.5411, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 5.292468547821045, |
|
"learning_rate": 2.631313131313131e-07, |
|
"loss": 0.5579, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"grad_norm": 5.443357944488525, |
|
"learning_rate": 2.5303030303030305e-07, |
|
"loss": 0.5844, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"grad_norm": 5.82540225982666, |
|
"learning_rate": 2.429292929292929e-07, |
|
"loss": 0.5693, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 4.729764938354492, |
|
"learning_rate": 2.3282828282828283e-07, |
|
"loss": 0.5705, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 5.492878437042236, |
|
"learning_rate": 2.2272727272727272e-07, |
|
"loss": 0.5694, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"grad_norm": 5.059893608093262, |
|
"learning_rate": 2.1262626262626264e-07, |
|
"loss": 0.5543, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"grad_norm": 5.176052093505859, |
|
"learning_rate": 2.0252525252525253e-07, |
|
"loss": 0.5411, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"eval_loss": 0.725495457649231, |
|
"eval_runtime": 564.5599, |
|
"eval_samples_per_second": 1.771, |
|
"eval_steps_per_second": 0.443, |
|
"step": 8000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 10000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 2000, |
|
"total_flos": 7.541625072742564e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|