{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.6, "eval_steps": 500, "global_step": 100, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.08, "grad_norm": 0.19458943605422974, "learning_rate": 4.9910902453260824e-05, "loss": 0.8099, "num_input_tokens_seen": 114944, "step": 5 }, { "epoch": 0.16, "grad_norm": 0.23063905537128448, "learning_rate": 4.964424488287009e-05, "loss": 0.958, "num_input_tokens_seen": 227728, "step": 10 }, { "epoch": 0.24, "grad_norm": 0.22587424516677856, "learning_rate": 4.920192797165511e-05, "loss": 0.715, "num_input_tokens_seen": 326640, "step": 15 }, { "epoch": 0.32, "grad_norm": 0.17264235019683838, "learning_rate": 4.858710446774951e-05, "loss": 0.7585, "num_input_tokens_seen": 437280, "step": 20 }, { "epoch": 0.4, "grad_norm": 0.19875957071781158, "learning_rate": 4.780415671242334e-05, "loss": 0.7342, "num_input_tokens_seen": 530192, "step": 25 }, { "epoch": 0.48, "grad_norm": 0.36600446701049805, "learning_rate": 4.685866540361456e-05, "loss": 0.6904, "num_input_tokens_seen": 634960, "step": 30 }, { "epoch": 0.56, "grad_norm": 0.20120520889759064, "learning_rate": 4.5757369817809415e-05, "loss": 0.8254, "num_input_tokens_seen": 740704, "step": 35 }, { "epoch": 0.64, "grad_norm": 0.22245633602142334, "learning_rate": 4.45081197738023e-05, "loss": 0.7551, "num_input_tokens_seen": 841600, "step": 40 }, { "epoch": 0.72, "grad_norm": 0.27814680337905884, "learning_rate": 4.3119819680728e-05, "loss": 0.7747, "num_input_tokens_seen": 947648, "step": 45 }, { "epoch": 0.8, "grad_norm": 0.27052441239356995, "learning_rate": 4.160236506918098e-05, "loss": 0.7027, "num_input_tokens_seen": 1047520, "step": 50 }, { "epoch": 0.88, "grad_norm": 0.38937926292419434, "learning_rate": 3.9966572057815373e-05, "loss": 0.7581, "num_input_tokens_seen": 1148080, "step": 55 }, { "epoch": 0.96, "grad_norm": 0.37379005551338196, "learning_rate": 3.822410025817406e-05, "loss": 0.7221, "num_input_tokens_seen": 1252880, "step": 60 }, { "epoch": 1.04, "grad_norm": 0.34213003516197205, "learning_rate": 3.638736966726585e-05, "loss": 0.8214, "num_input_tokens_seen": 1364304, "step": 65 }, { "epoch": 1.12, "grad_norm": 0.33407434821128845, "learning_rate": 3.44694721402644e-05, "loss": 0.6304, "num_input_tokens_seen": 1466240, "step": 70 }, { "epoch": 1.2, "grad_norm": 0.33909159898757935, "learning_rate": 3.2484078074333954e-05, "loss": 0.6434, "num_input_tokens_seen": 1558752, "step": 75 }, { "epoch": 1.28, "grad_norm": 0.4054814577102661, "learning_rate": 3.0445338968721287e-05, "loss": 0.6796, "num_input_tokens_seen": 1668480, "step": 80 }, { "epoch": 1.3599999999999999, "grad_norm": 0.2723485231399536, "learning_rate": 2.836778655564653e-05, "loss": 0.6651, "num_input_tokens_seen": 1773264, "step": 85 }, { "epoch": 1.44, "grad_norm": 0.3467393219470978, "learning_rate": 2.6266229220967818e-05, "loss": 0.7844, "num_input_tokens_seen": 1885920, "step": 90 }, { "epoch": 1.52, "grad_norm": 0.4250239431858063, "learning_rate": 2.4155646452913296e-05, "loss": 0.8139, "num_input_tokens_seen": 1996656, "step": 95 }, { "epoch": 1.6, "grad_norm": 0.30656036734580994, "learning_rate": 2.2051082071228854e-05, "loss": 0.6717, "num_input_tokens_seen": 2102896, "step": 100 } ], "logging_steps": 5, "max_steps": 186, "num_input_tokens_seen": 2102896, "num_train_epochs": 3, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.664954843267072e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }