| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 1332, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0903954802259887, | |
| "grad_norm": 1.8916133642196655, | |
| "learning_rate": 9.5e-05, | |
| "loss": 5.9949, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1807909604519774, | |
| "grad_norm": 2.2982795238494873, | |
| "learning_rate": 9.913636363636364e-05, | |
| "loss": 5.7733, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2711864406779661, | |
| "grad_norm": 1.444909930229187, | |
| "learning_rate": 9.822727272727274e-05, | |
| "loss": 5.7282, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3615819209039548, | |
| "grad_norm": 2.519259214401245, | |
| "learning_rate": 9.731818181818181e-05, | |
| "loss": 5.6798, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.4519774011299435, | |
| "grad_norm": 1.8974443674087524, | |
| "learning_rate": 9.640909090909092e-05, | |
| "loss": 5.6396, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5423728813559322, | |
| "grad_norm": 1.7946267127990723, | |
| "learning_rate": 9.55e-05, | |
| "loss": 5.6075, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.632768361581921, | |
| "grad_norm": 1.672841191291809, | |
| "learning_rate": 9.459090909090909e-05, | |
| "loss": 5.6705, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.7231638418079096, | |
| "grad_norm": 1.7475985288619995, | |
| "learning_rate": 9.368181818181818e-05, | |
| "loss": 5.5931, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8135593220338984, | |
| "grad_norm": 1.6054518222808838, | |
| "learning_rate": 9.277272727272728e-05, | |
| "loss": 5.543, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.903954802259887, | |
| "grad_norm": 1.4981119632720947, | |
| "learning_rate": 9.186363636363636e-05, | |
| "loss": 5.5685, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9943502824858758, | |
| "grad_norm": 2.0669379234313965, | |
| "learning_rate": 9.095454545454546e-05, | |
| "loss": 5.6052, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 5.506160736083984, | |
| "eval_runtime": 24.9734, | |
| "eval_samples_per_second": 3.964, | |
| "eval_steps_per_second": 3.964, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 1.0813559322033899, | |
| "grad_norm": 1.6721880435943604, | |
| "learning_rate": 9.004545454545455e-05, | |
| "loss": 4.9631, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.1717514124293786, | |
| "grad_norm": 1.9324918985366821, | |
| "learning_rate": 8.913636363636364e-05, | |
| "loss": 4.8356, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.2621468926553672, | |
| "grad_norm": 1.4607415199279785, | |
| "learning_rate": 8.822727272727274e-05, | |
| "loss": 4.8845, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.352542372881356, | |
| "grad_norm": 1.8261921405792236, | |
| "learning_rate": 8.731818181818183e-05, | |
| "loss": 4.8498, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.4429378531073447, | |
| "grad_norm": 1.818604588508606, | |
| "learning_rate": 8.640909090909092e-05, | |
| "loss": 4.7645, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.5333333333333332, | |
| "grad_norm": 1.6225180625915527, | |
| "learning_rate": 8.55e-05, | |
| "loss": 4.8982, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.623728813559322, | |
| "grad_norm": 1.8790407180786133, | |
| "learning_rate": 8.45909090909091e-05, | |
| "loss": 4.8945, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.7141242937853107, | |
| "grad_norm": 1.3966808319091797, | |
| "learning_rate": 8.368181818181818e-05, | |
| "loss": 4.8538, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.8045197740112995, | |
| "grad_norm": 1.648871898651123, | |
| "learning_rate": 8.277272727272728e-05, | |
| "loss": 4.8468, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.8949152542372882, | |
| "grad_norm": 1.3894695043563843, | |
| "learning_rate": 8.186363636363636e-05, | |
| "loss": 4.906, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.985310734463277, | |
| "grad_norm": 1.8563967943191528, | |
| "learning_rate": 8.095454545454546e-05, | |
| "loss": 4.9646, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 5.620055675506592, | |
| "eval_runtime": 24.422, | |
| "eval_samples_per_second": 4.054, | |
| "eval_steps_per_second": 4.054, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 2.072316384180791, | |
| "grad_norm": 1.446750521659851, | |
| "learning_rate": 8.004545454545455e-05, | |
| "loss": 4.3823, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.1627118644067798, | |
| "grad_norm": 1.1743519306182861, | |
| "learning_rate": 7.913636363636364e-05, | |
| "loss": 4.2338, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.2531073446327685, | |
| "grad_norm": 1.5246398448944092, | |
| "learning_rate": 7.822727272727273e-05, | |
| "loss": 4.2925, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.3435028248587573, | |
| "grad_norm": 1.6647032499313354, | |
| "learning_rate": 7.731818181818183e-05, | |
| "loss": 4.2874, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.4338983050847456, | |
| "grad_norm": 1.7699276208877563, | |
| "learning_rate": 7.64090909090909e-05, | |
| "loss": 4.2792, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.5242937853107343, | |
| "grad_norm": 1.5570639371871948, | |
| "learning_rate": 7.55e-05, | |
| "loss": 4.3313, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.614689265536723, | |
| "grad_norm": 1.5719196796417236, | |
| "learning_rate": 7.45909090909091e-05, | |
| "loss": 4.2433, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.705084745762712, | |
| "grad_norm": 1.3264689445495605, | |
| "learning_rate": 7.368181818181818e-05, | |
| "loss": 4.2903, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.7954802259887006, | |
| "grad_norm": 1.7016714811325073, | |
| "learning_rate": 7.277272727272728e-05, | |
| "loss": 4.3354, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.8858757062146894, | |
| "grad_norm": 2.0133845806121826, | |
| "learning_rate": 7.186363636363636e-05, | |
| "loss": 4.338, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.976271186440678, | |
| "grad_norm": 1.3552452325820923, | |
| "learning_rate": 7.095454545454546e-05, | |
| "loss": 4.3492, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 5.867856502532959, | |
| "eval_runtime": 23.5513, | |
| "eval_samples_per_second": 4.204, | |
| "eval_steps_per_second": 4.204, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 3.063276836158192, | |
| "grad_norm": 1.0316146612167358, | |
| "learning_rate": 7.004545454545455e-05, | |
| "loss": 4.0605, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 3.153672316384181, | |
| "grad_norm": 1.5106645822525024, | |
| "learning_rate": 6.913636363636364e-05, | |
| "loss": 3.8914, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 3.2440677966101696, | |
| "grad_norm": 1.211243748664856, | |
| "learning_rate": 6.822727272727273e-05, | |
| "loss": 3.9177, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 3.334463276836158, | |
| "grad_norm": 1.2083691358566284, | |
| "learning_rate": 6.731818181818183e-05, | |
| "loss": 3.9629, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 3.4248587570621467, | |
| "grad_norm": 1.6045001745224, | |
| "learning_rate": 6.64090909090909e-05, | |
| "loss": 3.9449, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 3.5152542372881355, | |
| "grad_norm": 1.8037244081497192, | |
| "learning_rate": 6.55e-05, | |
| "loss": 3.9325, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 3.605649717514124, | |
| "grad_norm": 1.5021991729736328, | |
| "learning_rate": 6.45909090909091e-05, | |
| "loss": 3.9516, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 3.696045197740113, | |
| "grad_norm": 1.5164258480072021, | |
| "learning_rate": 6.368181818181818e-05, | |
| "loss": 3.9408, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 3.7864406779661017, | |
| "grad_norm": 1.5084543228149414, | |
| "learning_rate": 6.277272727272727e-05, | |
| "loss": 3.9545, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 3.8768361581920905, | |
| "grad_norm": 1.895159363746643, | |
| "learning_rate": 6.186363636363637e-05, | |
| "loss": 3.9951, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 3.9672316384180792, | |
| "grad_norm": 1.7579407691955566, | |
| "learning_rate": 6.0954545454545456e-05, | |
| "loss": 3.9633, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 6.0311665534973145, | |
| "eval_runtime": 23.8205, | |
| "eval_samples_per_second": 4.156, | |
| "eval_steps_per_second": 4.156, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 4.054237288135593, | |
| "grad_norm": 1.2332321405410767, | |
| "learning_rate": 6.004545454545455e-05, | |
| "loss": 3.7544, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 4.144632768361582, | |
| "grad_norm": 1.4505114555358887, | |
| "learning_rate": 5.913636363636363e-05, | |
| "loss": 3.6543, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 4.235028248587571, | |
| "grad_norm": 1.600288987159729, | |
| "learning_rate": 5.822727272727273e-05, | |
| "loss": 3.617, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 4.3254237288135595, | |
| "grad_norm": 1.30964994430542, | |
| "learning_rate": 5.7318181818181824e-05, | |
| "loss": 3.605, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 4.415819209039548, | |
| "grad_norm": 1.263311505317688, | |
| "learning_rate": 5.640909090909091e-05, | |
| "loss": 3.6401, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 4.506214689265537, | |
| "grad_norm": 1.4733268022537231, | |
| "learning_rate": 5.550000000000001e-05, | |
| "loss": 3.6869, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 4.596610169491526, | |
| "grad_norm": 1.2877130508422852, | |
| "learning_rate": 5.4590909090909096e-05, | |
| "loss": 3.661, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 4.6870056497175145, | |
| "grad_norm": 2.0695672035217285, | |
| "learning_rate": 5.368181818181819e-05, | |
| "loss": 3.6567, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 4.777401129943502, | |
| "grad_norm": 1.6412814855575562, | |
| "learning_rate": 5.277272727272727e-05, | |
| "loss": 3.7096, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 4.867796610169491, | |
| "grad_norm": 1.3960940837860107, | |
| "learning_rate": 5.186363636363637e-05, | |
| "loss": 3.6971, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 4.95819209039548, | |
| "grad_norm": 1.1688611507415771, | |
| "learning_rate": 5.095454545454546e-05, | |
| "loss": 3.638, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 6.19950532913208, | |
| "eval_runtime": 24.1889, | |
| "eval_samples_per_second": 4.093, | |
| "eval_steps_per_second": 4.093, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 5.045197740112994, | |
| "grad_norm": 1.2310229539871216, | |
| "learning_rate": 5.004545454545455e-05, | |
| "loss": 3.4653, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 5.135593220338983, | |
| "grad_norm": 1.0305955410003662, | |
| "learning_rate": 4.913636363636364e-05, | |
| "loss": 3.3167, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 5.2259887005649714, | |
| "grad_norm": 1.1152170896530151, | |
| "learning_rate": 4.822727272727273e-05, | |
| "loss": 3.3735, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 5.31638418079096, | |
| "grad_norm": 1.3132717609405518, | |
| "learning_rate": 4.7318181818181824e-05, | |
| "loss": 3.4021, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 5.406779661016949, | |
| "grad_norm": 1.0768282413482666, | |
| "learning_rate": 4.640909090909091e-05, | |
| "loss": 3.3842, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 5.497175141242938, | |
| "grad_norm": 1.283962607383728, | |
| "learning_rate": 4.55e-05, | |
| "loss": 3.3741, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 5.5875706214689265, | |
| "grad_norm": 1.479189395904541, | |
| "learning_rate": 4.45909090909091e-05, | |
| "loss": 3.4405, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 5.677966101694915, | |
| "grad_norm": 1.7963414192199707, | |
| "learning_rate": 4.3681818181818185e-05, | |
| "loss": 3.3677, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 5.768361581920904, | |
| "grad_norm": 1.2746598720550537, | |
| "learning_rate": 4.2772727272727274e-05, | |
| "loss": 3.4565, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 5.858757062146893, | |
| "grad_norm": 1.3244794607162476, | |
| "learning_rate": 4.186363636363637e-05, | |
| "loss": 3.4152, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 5.9491525423728815, | |
| "grad_norm": 1.5454453229904175, | |
| "learning_rate": 4.095454545454546e-05, | |
| "loss": 3.4649, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 6.328514575958252, | |
| "eval_runtime": 24.2123, | |
| "eval_samples_per_second": 4.089, | |
| "eval_steps_per_second": 4.089, | |
| "step": 1332 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 2220, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3980325845764800.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |