{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.968503937007874, "eval_steps": 500, "global_step": 500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.03937007874015748, "grad_norm": 19.470706939697266, "learning_rate": 1.9790026246719162e-05, "loss": 3.7835, "step": 10 }, { "epoch": 0.07874015748031496, "grad_norm": 15.06888484954834, "learning_rate": 1.952755905511811e-05, "loss": 3.5622, "step": 20 }, { "epoch": 0.11811023622047244, "grad_norm": 10.949335098266602, "learning_rate": 1.9265091863517064e-05, "loss": 3.0315, "step": 30 }, { "epoch": 0.15748031496062992, "grad_norm": 13.46808910369873, "learning_rate": 1.9002624671916013e-05, "loss": 3.1242, "step": 40 }, { "epoch": 0.1968503937007874, "grad_norm": 14.465880393981934, "learning_rate": 1.8740157480314962e-05, "loss": 3.1138, "step": 50 }, { "epoch": 0.23622047244094488, "grad_norm": 16.78143310546875, "learning_rate": 1.847769028871391e-05, "loss": 2.8584, "step": 60 }, { "epoch": 0.2755905511811024, "grad_norm": 15.088367462158203, "learning_rate": 1.821522309711286e-05, "loss": 2.9588, "step": 70 }, { "epoch": 0.31496062992125984, "grad_norm": 13.963665008544922, "learning_rate": 1.7952755905511813e-05, "loss": 3.1075, "step": 80 }, { "epoch": 0.3543307086614173, "grad_norm": 15.01663589477539, "learning_rate": 1.7690288713910762e-05, "loss": 2.8811, "step": 90 }, { "epoch": 0.3937007874015748, "grad_norm": 22.79145622253418, "learning_rate": 1.7427821522309714e-05, "loss": 2.946, "step": 100 }, { "epoch": 0.4330708661417323, "grad_norm": 17.157787322998047, "learning_rate": 1.7165354330708663e-05, "loss": 2.7091, "step": 110 }, { "epoch": 0.47244094488188976, "grad_norm": 15.345536231994629, "learning_rate": 1.6902887139107613e-05, "loss": 2.8872, "step": 120 }, { "epoch": 0.5118110236220472, "grad_norm": 14.813170433044434, "learning_rate": 1.6640419947506562e-05, "loss": 2.7625, "step": 130 }, { "epoch": 0.5511811023622047, "grad_norm": 13.589457511901855, "learning_rate": 1.6377952755905514e-05, "loss": 2.6937, "step": 140 }, { "epoch": 0.5905511811023622, "grad_norm": 16.824535369873047, "learning_rate": 1.6115485564304463e-05, "loss": 3.1951, "step": 150 }, { "epoch": 0.6299212598425197, "grad_norm": 15.816085815429688, "learning_rate": 1.5853018372703412e-05, "loss": 2.7028, "step": 160 }, { "epoch": 0.6692913385826772, "grad_norm": 12.972024917602539, "learning_rate": 1.559055118110236e-05, "loss": 2.7171, "step": 170 }, { "epoch": 0.7086614173228346, "grad_norm": 15.205809593200684, "learning_rate": 1.5328083989501314e-05, "loss": 2.7386, "step": 180 }, { "epoch": 0.7480314960629921, "grad_norm": 15.404927253723145, "learning_rate": 1.5065616797900265e-05, "loss": 2.5878, "step": 190 }, { "epoch": 0.7874015748031497, "grad_norm": 14.278895378112793, "learning_rate": 1.4803149606299214e-05, "loss": 2.535, "step": 200 }, { "epoch": 0.8267716535433071, "grad_norm": 13.026575088500977, "learning_rate": 1.4540682414698165e-05, "loss": 2.7611, "step": 210 }, { "epoch": 0.8661417322834646, "grad_norm": 14.615636825561523, "learning_rate": 1.4278215223097114e-05, "loss": 2.4755, "step": 220 }, { "epoch": 0.905511811023622, "grad_norm": 15.044017791748047, "learning_rate": 1.4015748031496063e-05, "loss": 2.7637, "step": 230 }, { "epoch": 0.9448818897637795, "grad_norm": 15.503686904907227, "learning_rate": 1.3753280839895014e-05, "loss": 2.8203, "step": 240 }, { "epoch": 0.984251968503937, "grad_norm": 13.733269691467285, "learning_rate": 1.3490813648293963e-05, "loss": 2.4717, "step": 250 }, { "epoch": 1.0, "eval_loss": 2.6172032356262207, "eval_runtime": 1.7583, "eval_samples_per_second": 512.44, "eval_steps_per_second": 16.494, "step": 254 }, { "epoch": 1.0236220472440944, "grad_norm": 12.064373970031738, "learning_rate": 1.3228346456692915e-05, "loss": 2.6106, "step": 260 }, { "epoch": 1.0629921259842519, "grad_norm": 14.499472618103027, "learning_rate": 1.2965879265091864e-05, "loss": 2.5675, "step": 270 }, { "epoch": 1.1023622047244095, "grad_norm": 13.379920959472656, "learning_rate": 1.2703412073490815e-05, "loss": 2.3713, "step": 280 }, { "epoch": 1.141732283464567, "grad_norm": 13.791425704956055, "learning_rate": 1.2440944881889764e-05, "loss": 2.5336, "step": 290 }, { "epoch": 1.1811023622047245, "grad_norm": 13.244888305664062, "learning_rate": 1.2178477690288715e-05, "loss": 2.6234, "step": 300 }, { "epoch": 1.220472440944882, "grad_norm": 19.034948348999023, "learning_rate": 1.1916010498687664e-05, "loss": 2.5233, "step": 310 }, { "epoch": 1.2598425196850394, "grad_norm": 15.304523468017578, "learning_rate": 1.1653543307086615e-05, "loss": 2.7615, "step": 320 }, { "epoch": 1.2992125984251968, "grad_norm": 13.435171127319336, "learning_rate": 1.1391076115485564e-05, "loss": 2.6509, "step": 330 }, { "epoch": 1.3385826771653544, "grad_norm": 17.483469009399414, "learning_rate": 1.1128608923884517e-05, "loss": 2.5112, "step": 340 }, { "epoch": 1.3779527559055118, "grad_norm": 13.701622009277344, "learning_rate": 1.0866141732283466e-05, "loss": 2.4639, "step": 350 }, { "epoch": 1.4173228346456692, "grad_norm": 12.15971565246582, "learning_rate": 1.0603674540682417e-05, "loss": 2.3728, "step": 360 }, { "epoch": 1.4566929133858268, "grad_norm": 16.696950912475586, "learning_rate": 1.0341207349081366e-05, "loss": 2.6276, "step": 370 }, { "epoch": 1.4960629921259843, "grad_norm": 14.275362968444824, "learning_rate": 1.0078740157480316e-05, "loss": 2.2527, "step": 380 }, { "epoch": 1.5354330708661417, "grad_norm": 11.532317161560059, "learning_rate": 9.816272965879266e-06, "loss": 2.3952, "step": 390 }, { "epoch": 1.574803149606299, "grad_norm": 14.485555648803711, "learning_rate": 9.553805774278216e-06, "loss": 2.4501, "step": 400 }, { "epoch": 1.6141732283464567, "grad_norm": 12.941930770874023, "learning_rate": 9.291338582677165e-06, "loss": 2.5935, "step": 410 }, { "epoch": 1.6535433070866141, "grad_norm": 10.413193702697754, "learning_rate": 9.028871391076116e-06, "loss": 2.3981, "step": 420 }, { "epoch": 1.6929133858267718, "grad_norm": 13.350451469421387, "learning_rate": 8.766404199475065e-06, "loss": 2.5693, "step": 430 }, { "epoch": 1.7322834645669292, "grad_norm": 12.321857452392578, "learning_rate": 8.503937007874016e-06, "loss": 2.4089, "step": 440 }, { "epoch": 1.7716535433070866, "grad_norm": 17.121238708496094, "learning_rate": 8.241469816272967e-06, "loss": 2.2449, "step": 450 }, { "epoch": 1.811023622047244, "grad_norm": 17.085081100463867, "learning_rate": 7.979002624671916e-06, "loss": 2.5096, "step": 460 }, { "epoch": 1.8503937007874016, "grad_norm": 13.748458862304688, "learning_rate": 7.716535433070867e-06, "loss": 2.427, "step": 470 }, { "epoch": 1.889763779527559, "grad_norm": 14.016273498535156, "learning_rate": 7.454068241469818e-06, "loss": 2.2703, "step": 480 }, { "epoch": 1.9291338582677167, "grad_norm": 9.295332908630371, "learning_rate": 7.191601049868768e-06, "loss": 2.3936, "step": 490 }, { "epoch": 1.968503937007874, "grad_norm": 14.30349349975586, "learning_rate": 6.929133858267717e-06, "loss": 2.603, "step": 500 } ], "logging_steps": 10, "max_steps": 762, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 4206012908544000.0, "train_batch_size": 32, "trial_name": null, "trial_params": null }