{ "best_metric": null, "best_model_checkpoint": null, "epoch": 6.956521739130435, "eval_steps": 500, "global_step": 40, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.17391304347826086, "grad_norm": 0.020676065236330032, "learning_rate": 0.0001996917333733128, "loss": 1.4129, "num_input_tokens_seen": 16384, "step": 1 }, { "epoch": 0.34782608695652173, "grad_norm": 0.019586477428674698, "learning_rate": 0.00019876883405951377, "loss": 1.3486, "num_input_tokens_seen": 32768, "step": 2 }, { "epoch": 0.5217391304347826, "grad_norm": 0.01925278641283512, "learning_rate": 0.00019723699203976766, "loss": 1.394, "num_input_tokens_seen": 49152, "step": 3 }, { "epoch": 0.6956521739130435, "grad_norm": 0.018114449456334114, "learning_rate": 0.00019510565162951537, "loss": 1.3051, "num_input_tokens_seen": 65536, "step": 4 }, { "epoch": 0.8695652173913043, "grad_norm": 0.0193056408315897, "learning_rate": 0.0001923879532511287, "loss": 1.3236, "num_input_tokens_seen": 81920, "step": 5 }, { "epoch": 1.0434782608695652, "grad_norm": 0.021124837920069695, "learning_rate": 0.0001891006524188368, "loss": 1.4556, "num_input_tokens_seen": 98304, "step": 6 }, { "epoch": 1.2173913043478262, "grad_norm": 0.02479449287056923, "learning_rate": 0.00018526401643540922, "loss": 1.3086, "num_input_tokens_seen": 114688, "step": 7 }, { "epoch": 1.391304347826087, "grad_norm": 0.029818333685398102, "learning_rate": 0.00018090169943749476, "loss": 1.1029, "num_input_tokens_seen": 131072, "step": 8 }, { "epoch": 1.5652173913043477, "grad_norm": 0.02648150734603405, "learning_rate": 0.0001760405965600031, "loss": 1.2992, "num_input_tokens_seen": 147456, "step": 9 }, { "epoch": 1.7391304347826086, "grad_norm": 0.029215380549430847, "learning_rate": 0.00017071067811865476, "loss": 1.1914, "num_input_tokens_seen": 163840, "step": 10 }, { "epoch": 1.9130434782608696, "grad_norm": 0.028632890433073044, "learning_rate": 0.00016494480483301836, "loss": 1.2417, "num_input_tokens_seen": 180224, "step": 11 }, { "epoch": 2.0869565217391304, "grad_norm": 0.030310342088341713, "learning_rate": 0.00015877852522924732, "loss": 1.3909, "num_input_tokens_seen": 196608, "step": 12 }, { "epoch": 2.260869565217391, "grad_norm": 0.031336575746536255, "learning_rate": 0.0001522498564715949, "loss": 1.1383, "num_input_tokens_seen": 212992, "step": 13 }, { "epoch": 2.4347826086956523, "grad_norm": 0.034497059881687164, "learning_rate": 0.00014539904997395468, "loss": 1.2009, "num_input_tokens_seen": 229376, "step": 14 }, { "epoch": 2.608695652173913, "grad_norm": 0.03379025310277939, "learning_rate": 0.000138268343236509, "loss": 1.1918, "num_input_tokens_seen": 245760, "step": 15 }, { "epoch": 2.782608695652174, "grad_norm": 0.03916629031300545, "learning_rate": 0.00013090169943749476, "loss": 1.0642, "num_input_tokens_seen": 262144, "step": 16 }, { "epoch": 2.9565217391304346, "grad_norm": 0.042235296219587326, "learning_rate": 0.00012334453638559057, "loss": 1.2661, "num_input_tokens_seen": 278528, "step": 17 }, { "epoch": 3.130434782608696, "grad_norm": 0.03873530402779579, "learning_rate": 0.0001156434465040231, "loss": 1.0416, "num_input_tokens_seen": 294912, "step": 18 }, { "epoch": 3.3043478260869565, "grad_norm": 0.04076506569981575, "learning_rate": 0.0001078459095727845, "loss": 1.0564, "num_input_tokens_seen": 311296, "step": 19 }, { "epoch": 3.4782608695652173, "grad_norm": 0.03801592066884041, "learning_rate": 0.0001, "loss": 0.9894, "num_input_tokens_seen": 327680, "step": 20 }, { "epoch": 3.6521739130434785, "grad_norm": 0.04381837695837021, "learning_rate": 9.215409042721552e-05, "loss": 1.0994, "num_input_tokens_seen": 344064, "step": 21 }, { "epoch": 3.8260869565217392, "grad_norm": 0.0512906014919281, "learning_rate": 8.435655349597689e-05, "loss": 1.3105, "num_input_tokens_seen": 360448, "step": 22 }, { "epoch": 4.0, "grad_norm": 0.04802567511796951, "learning_rate": 7.66554636144095e-05, "loss": 1.0337, "num_input_tokens_seen": 376832, "step": 23 }, { "epoch": 4.173913043478261, "grad_norm": 0.04690830409526825, "learning_rate": 6.909830056250527e-05, "loss": 1.0329, "num_input_tokens_seen": 393216, "step": 24 }, { "epoch": 4.3478260869565215, "grad_norm": 0.042034752666950226, "learning_rate": 6.173165676349103e-05, "loss": 0.9724, "num_input_tokens_seen": 409600, "step": 25 }, { "epoch": 4.521739130434782, "grad_norm": 0.04824018105864525, "learning_rate": 5.4600950026045326e-05, "loss": 1.1225, "num_input_tokens_seen": 425984, "step": 26 }, { "epoch": 4.695652173913043, "grad_norm": 0.049554552882909775, "learning_rate": 4.7750143528405126e-05, "loss": 0.95, "num_input_tokens_seen": 442368, "step": 27 }, { "epoch": 4.869565217391305, "grad_norm": 0.04718406870961189, "learning_rate": 4.12214747707527e-05, "loss": 1.0672, "num_input_tokens_seen": 458752, "step": 28 }, { "epoch": 5.043478260869565, "grad_norm": 0.05403953045606613, "learning_rate": 3.5055195166981645e-05, "loss": 0.997, "num_input_tokens_seen": 475136, "step": 29 }, { "epoch": 5.217391304347826, "grad_norm": 0.05253535881638527, "learning_rate": 2.9289321881345254e-05, "loss": 0.8303, "num_input_tokens_seen": 491520, "step": 30 }, { "epoch": 5.391304347826087, "grad_norm": 0.0478719137609005, "learning_rate": 2.3959403439996907e-05, "loss": 0.8613, "num_input_tokens_seen": 507904, "step": 31 }, { "epoch": 5.565217391304348, "grad_norm": 0.05444710701704025, "learning_rate": 1.9098300562505266e-05, "loss": 0.8735, "num_input_tokens_seen": 524288, "step": 32 }, { "epoch": 5.739130434782608, "grad_norm": 0.050455138087272644, "learning_rate": 1.4735983564590783e-05, "loss": 1.1102, "num_input_tokens_seen": 540672, "step": 33 }, { "epoch": 5.913043478260869, "grad_norm": 0.056231990456581116, "learning_rate": 1.0899347581163221e-05, "loss": 1.2143, "num_input_tokens_seen": 557056, "step": 34 }, { "epoch": 6.086956521739131, "grad_norm": 0.056390367448329926, "learning_rate": 7.612046748871327e-06, "loss": 1.1449, "num_input_tokens_seen": 573440, "step": 35 }, { "epoch": 6.260869565217392, "grad_norm": 0.053139008581638336, "learning_rate": 4.8943483704846475e-06, "loss": 1.0676, "num_input_tokens_seen": 589824, "step": 36 }, { "epoch": 6.434782608695652, "grad_norm": 0.0504545122385025, "learning_rate": 2.7630079602323442e-06, "loss": 0.981, "num_input_tokens_seen": 606208, "step": 37 }, { "epoch": 6.608695652173913, "grad_norm": 0.051228489726781845, "learning_rate": 1.231165940486234e-06, "loss": 0.8797, "num_input_tokens_seen": 622592, "step": 38 }, { "epoch": 6.782608695652174, "grad_norm": 0.05340878665447235, "learning_rate": 3.0826662668720364e-07, "loss": 0.8676, "num_input_tokens_seen": 638976, "step": 39 }, { "epoch": 6.956521739130435, "grad_norm": 0.0532316230237484, "learning_rate": 0.0, "loss": 1.0274, "num_input_tokens_seen": 655360, "step": 40 }, { "epoch": 6.956521739130435, "num_input_tokens_seen": 655360, "step": 40, "total_flos": 3.034267760197632e+16, "train_loss": 1.1291589722037316, "train_runtime": 509.8753, "train_samples_per_second": 0.722, "train_steps_per_second": 0.078 } ], "logging_steps": 1, "max_steps": 40, "num_input_tokens_seen": 655360, "num_train_epochs": 8, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": false, "should_training_stop": false }, "attributes": {} } }, "total_flos": 3.034267760197632e+16, "train_batch_size": 1, "trial_name": null, "trial_params": null }