| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9985185185185185, | |
| "eval_steps": 200, | |
| "global_step": 337, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02962962962962963, | |
| "grad_norm": 2.5700944579076386, | |
| "learning_rate": 2.647058823529412e-06, | |
| "loss": 0.0501, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05925925925925926, | |
| "grad_norm": 0.5048094009511486, | |
| "learning_rate": 5.588235294117647e-06, | |
| "loss": 0.0069, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08888888888888889, | |
| "grad_norm": 0.19758011909001763, | |
| "learning_rate": 8.529411764705883e-06, | |
| "loss": 0.003, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.11851851851851852, | |
| "grad_norm": 0.23020557863189864, | |
| "learning_rate": 9.993282661430058e-06, | |
| "loss": 0.0021, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.2573589170265553, | |
| "learning_rate": 9.939652198703785e-06, | |
| "loss": 0.0018, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.17777777777777778, | |
| "grad_norm": 0.08729971947390419, | |
| "learning_rate": 9.83296729242084e-06, | |
| "loss": 0.0011, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.2074074074074074, | |
| "grad_norm": 0.061745481061511, | |
| "learning_rate": 9.674373794173818e-06, | |
| "loss": 0.0014, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.23703703703703705, | |
| "grad_norm": 0.06325390589424902, | |
| "learning_rate": 9.465575080933959e-06, | |
| "loss": 0.0009, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 0.08158926168862335, | |
| "learning_rate": 9.208813759893158e-06, | |
| "loss": 0.0013, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.17412238400373709, | |
| "learning_rate": 8.90684758169189e-06, | |
| "loss": 0.0012, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.32592592592592595, | |
| "grad_norm": 0.1603433450392654, | |
| "learning_rate": 8.562919820737537e-06, | |
| "loss": 0.0013, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 0.055039229645190126, | |
| "learning_rate": 8.180724440743957e-06, | |
| "loss": 0.0016, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3851851851851852, | |
| "grad_norm": 0.08069486692576644, | |
| "learning_rate": 7.764366419632636e-06, | |
| "loss": 0.0011, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.4148148148148148, | |
| "grad_norm": 0.0738569460148035, | |
| "learning_rate": 7.318317659926637e-06, | |
| "loss": 0.0016, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.058126643666483985, | |
| "learning_rate": 6.8473689581827585e-06, | |
| "loss": 0.001, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.4740740740740741, | |
| "grad_norm": 0.05719176786796181, | |
| "learning_rate": 6.356578549335295e-06, | |
| "loss": 0.001, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5037037037037037, | |
| "grad_norm": 0.08461235331930808, | |
| "learning_rate": 5.851217778611994e-06, | |
| "loss": 0.0009, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 0.08573440961052009, | |
| "learning_rate": 5.336714484534183e-06, | |
| "loss": 0.0009, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.562962962962963, | |
| "grad_norm": 0.051441087691438556, | |
| "learning_rate": 4.818594701097239e-06, | |
| "loss": 0.0014, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.08575414629435474, | |
| "learning_rate": 4.3024233052803855e-06, | |
| "loss": 0.0009, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "eval_loss": 0.0009050487424246967, | |
| "eval_runtime": 55.0973, | |
| "eval_samples_per_second": 10.89, | |
| "eval_steps_per_second": 2.722, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6222222222222222, | |
| "grad_norm": 0.04271952284889459, | |
| "learning_rate": 3.7937442473625787e-06, | |
| "loss": 0.001, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.6518518518518519, | |
| "grad_norm": 0.07497511504373228, | |
| "learning_rate": 3.29802100600206e-06, | |
| "loss": 0.001, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6814814814814815, | |
| "grad_norm": 0.07629854288403448, | |
| "learning_rate": 2.820577907623145e-06, | |
| "loss": 0.0009, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 0.055976860309317575, | |
| "learning_rate": 2.3665429403706506e-06, | |
| "loss": 0.0007, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.07925297562080282, | |
| "learning_rate": 1.9407926768399456e-06, | |
| "loss": 0.0008, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.7703703703703704, | |
| "grad_norm": 0.1915028212124568, | |
| "learning_rate": 1.5478998971412669e-06, | |
| "loss": 0.0006, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.1626955978713893, | |
| "learning_rate": 1.1920844748539373e-06, | |
| "loss": 0.0008, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.8296296296296296, | |
| "grad_norm": 0.20206732764086238, | |
| "learning_rate": 8.771680533809634e-07, | |
| "loss": 0.0013, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.8592592592592593, | |
| "grad_norm": 0.07520440354893004, | |
| "learning_rate": 6.065329995036573e-07, | |
| "loss": 0.0011, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.04748220014060114, | |
| "learning_rate": 3.8308607499648765e-07, | |
| "loss": 0.0008, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9185185185185185, | |
| "grad_norm": 0.0296088345256548, | |
| "learning_rate": 2.0922721648805045e-07, | |
| "loss": 0.0005, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.9481481481481482, | |
| "grad_norm": 0.07908790856885718, | |
| "learning_rate": 8.682375888868167e-08, | |
| "loss": 0.0008, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.9777777777777777, | |
| "grad_norm": 0.08879302978714725, | |
| "learning_rate": 1.7190379238609666e-08, | |
| "loss": 0.0008, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.9985185185185185, | |
| "step": 337, | |
| "total_flos": 29848581832704.0, | |
| "train_loss": 0.0027619419475735474, | |
| "train_runtime": 2459.1521, | |
| "train_samples_per_second": 2.196, | |
| "train_steps_per_second": 0.137 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 337, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 29848581832704.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |