{ "best_metric": null, "best_model_checkpoint": null, "epoch": 6.857142857142857, "eval_steps": 50.0, "global_step": 24, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.2857142857142857, "grad_norm": 1.2684494256973267, "learning_rate": 9.583333333333334e-05, "loss": 9.5898, "step": 1 }, { "epoch": 0.5714285714285714, "grad_norm": 1.1818389892578125, "learning_rate": 9.166666666666667e-05, "loss": 9.5011, "step": 2 }, { "epoch": 0.8571428571428571, "grad_norm": 1.2637732028961182, "learning_rate": 8.75e-05, "loss": 9.4039, "step": 3 }, { "epoch": 0.8571428571428571, "eval_clap": -0.006993885152041912, "eval_loss": 4.442792892456055, "eval_runtime": 152.7878, "eval_samples_per_second": 0.052, "eval_steps_per_second": 0.052, "step": 3 }, { "epoch": 1.1428571428571428, "grad_norm": 1.3919429779052734, "learning_rate": 8.333333333333334e-05, "loss": 9.2841, "step": 4 }, { "epoch": 1.4285714285714286, "grad_norm": 1.372936487197876, "learning_rate": 7.916666666666666e-05, "loss": 9.1687, "step": 5 }, { "epoch": 1.7142857142857144, "grad_norm": 1.4058455228805542, "learning_rate": 7.500000000000001e-05, "loss": 9.0566, "step": 6 }, { "epoch": 2.0, "grad_norm": 1.5498394966125488, "learning_rate": 7.083333333333334e-05, "loss": 8.8992, "step": 7 }, { "epoch": 2.0, "eval_clap": 0.011688459664583206, "eval_loss": 4.4434919357299805, "eval_runtime": 159.5674, "eval_samples_per_second": 0.05, "eval_steps_per_second": 0.05, "step": 7 }, { "epoch": 2.2857142857142856, "grad_norm": 1.9276645183563232, "learning_rate": 6.666666666666667e-05, "loss": 8.7901, "step": 8 }, { "epoch": 2.571428571428571, "grad_norm": 1.75799560546875, "learning_rate": 6.25e-05, "loss": 8.7254, "step": 9 }, { "epoch": 2.857142857142857, "grad_norm": 2.2315149307250977, "learning_rate": 5.833333333333334e-05, "loss": 8.5373, "step": 10 }, { "epoch": 2.857142857142857, "eval_clap": -0.02875398099422455, "eval_loss": 4.446320533752441, "eval_runtime": 162.9065, "eval_samples_per_second": 0.049, "eval_steps_per_second": 0.049, "step": 10 }, { "epoch": 3.142857142857143, "grad_norm": 2.4357290267944336, "learning_rate": 5.4166666666666664e-05, "loss": 8.4189, "step": 11 }, { "epoch": 3.4285714285714284, "grad_norm": 2.5207338333129883, "learning_rate": 5e-05, "loss": 8.2969, "step": 12 }, { "epoch": 3.7142857142857144, "grad_norm": 3.3077168464660645, "learning_rate": 4.5833333333333334e-05, "loss": 8.2279, "step": 13 }, { "epoch": 4.0, "grad_norm": 2.170753240585327, "learning_rate": 4.166666666666667e-05, "loss": 8.0105, "step": 14 }, { "epoch": 4.0, "eval_clap": -0.011095928959548473, "eval_loss": 4.455233573913574, "eval_runtime": 156.9405, "eval_samples_per_second": 0.051, "eval_steps_per_second": 0.051, "step": 14 }, { "epoch": 4.285714285714286, "grad_norm": 2.024324417114258, "learning_rate": 3.7500000000000003e-05, "loss": 7.9972, "step": 15 }, { "epoch": 4.571428571428571, "grad_norm": 1.7727833986282349, "learning_rate": 3.3333333333333335e-05, "loss": 7.9157, "step": 16 }, { "epoch": 4.857142857142857, "grad_norm": 1.7100613117218018, "learning_rate": 2.916666666666667e-05, "loss": 7.8068, "step": 17 }, { "epoch": 4.857142857142857, "eval_clap": -0.019424768164753914, "eval_loss": 4.468198776245117, "eval_runtime": 150.9644, "eval_samples_per_second": 0.053, "eval_steps_per_second": 0.053, "step": 17 }, { "epoch": 5.142857142857143, "grad_norm": 1.5242968797683716, "learning_rate": 2.5e-05, "loss": 7.7691, "step": 18 }, { "epoch": 5.428571428571429, "grad_norm": 1.3554269075393677, "learning_rate": 2.0833333333333336e-05, "loss": 7.7686, "step": 19 }, { "epoch": 5.714285714285714, "grad_norm": 1.5143285989761353, "learning_rate": 1.6666666666666667e-05, "loss": 7.7224, "step": 20 }, { "epoch": 6.0, "grad_norm": 1.2512257099151611, "learning_rate": 1.25e-05, "loss": 7.6881, "step": 21 }, { "epoch": 6.0, "eval_clap": 0.007096702232956886, "eval_loss": 4.4912824630737305, "eval_runtime": 151.9292, "eval_samples_per_second": 0.053, "eval_steps_per_second": 0.053, "step": 21 }, { "epoch": 6.285714285714286, "grad_norm": 1.1607741117477417, "learning_rate": 8.333333333333334e-06, "loss": 7.6731, "step": 22 }, { "epoch": 6.571428571428571, "grad_norm": 1.4850616455078125, "learning_rate": 4.166666666666667e-06, "loss": 7.6376, "step": 23 }, { "epoch": 6.857142857142857, "grad_norm": 1.2155146598815918, "learning_rate": 0.0, "loss": 7.6672, "step": 24 }, { "epoch": 6.857142857142857, "eval_clap": -0.02221839316189289, "eval_loss": 4.501054763793945, "eval_runtime": 152.1978, "eval_samples_per_second": 0.053, "eval_steps_per_second": 0.053, "step": 24 }, { "epoch": 6.857142857142857, "step": 24, "total_flos": 10310332824144.0, "train_loss": 8.39816782871882, "train_runtime": 1346.8351, "train_samples_per_second": 0.321, "train_steps_per_second": 0.018 } ], "logging_steps": 1.0, "max_steps": 24, "num_input_tokens_seen": 0, "num_train_epochs": 8, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 10310332824144.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }