| { | |
| "best_global_step": 500, | |
| "best_metric": 0.21088281273841858, | |
| "best_model_checkpoint": "./qwen2-7b-math-coder/checkpoint-500", | |
| "epoch": 0.22904260192395787, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.004580852038479157, | |
| "grad_norm": 3.668745756149292, | |
| "learning_rate": 4.5e-06, | |
| "loss": 2.0337, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.009161704076958314, | |
| "grad_norm": 3.277991533279419, | |
| "learning_rate": 9.5e-06, | |
| "loss": 1.7448, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.013742556115437472, | |
| "grad_norm": 2.6093215942382812, | |
| "learning_rate": 1.45e-05, | |
| "loss": 1.2869, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01832340815391663, | |
| "grad_norm": 1.7964814901351929, | |
| "learning_rate": 1.9500000000000003e-05, | |
| "loss": 0.9077, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.022904260192395786, | |
| "grad_norm": 1.5602203607559204, | |
| "learning_rate": 2.45e-05, | |
| "loss": 0.5228, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.027485112230874943, | |
| "grad_norm": 2.1516997814178467, | |
| "learning_rate": 2.95e-05, | |
| "loss": 0.3872, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0320659642693541, | |
| "grad_norm": 1.4016958475112915, | |
| "learning_rate": 3.45e-05, | |
| "loss": 0.3371, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.03664681630783326, | |
| "grad_norm": 1.5028544664382935, | |
| "learning_rate": 3.9500000000000005e-05, | |
| "loss": 0.31, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.04122766834631241, | |
| "grad_norm": 1.7520164251327515, | |
| "learning_rate": 4.4500000000000004e-05, | |
| "loss": 0.2495, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.04580852038479157, | |
| "grad_norm": 0.7592225074768066, | |
| "learning_rate": 4.9500000000000004e-05, | |
| "loss": 0.238, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.050389372423270726, | |
| "grad_norm": 1.602748990058899, | |
| "learning_rate": 4.978396543446952e-05, | |
| "loss": 0.2472, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.054970224461749886, | |
| "grad_norm": 0.8727825880050659, | |
| "learning_rate": 4.954392702832453e-05, | |
| "loss": 0.2455, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.05955107650022904, | |
| "grad_norm": 0.8755759596824646, | |
| "learning_rate": 4.930388862217955e-05, | |
| "loss": 0.2514, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.0641319285387082, | |
| "grad_norm": 1.3854660987854004, | |
| "learning_rate": 4.906385021603457e-05, | |
| "loss": 0.2415, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.06871278057718735, | |
| "grad_norm": 0.6988345384597778, | |
| "learning_rate": 4.8823811809889585e-05, | |
| "loss": 0.248, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.07329363261566652, | |
| "grad_norm": 0.7716445326805115, | |
| "learning_rate": 4.8583773403744604e-05, | |
| "loss": 0.2289, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.07787448465414568, | |
| "grad_norm": 0.618770956993103, | |
| "learning_rate": 4.8343734997599615e-05, | |
| "loss": 0.2631, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.08245533669262482, | |
| "grad_norm": 0.311239093542099, | |
| "learning_rate": 4.8103696591454634e-05, | |
| "loss": 0.2291, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.08703618873110398, | |
| "grad_norm": 0.6665637493133545, | |
| "learning_rate": 4.786365818530965e-05, | |
| "loss": 0.2227, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.09161704076958314, | |
| "grad_norm": 0.7171844840049744, | |
| "learning_rate": 4.762361977916467e-05, | |
| "loss": 0.2291, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.0961978928080623, | |
| "grad_norm": 0.606952965259552, | |
| "learning_rate": 4.738358137301969e-05, | |
| "loss": 0.2496, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.10077874484654145, | |
| "grad_norm": 0.6014417409896851, | |
| "learning_rate": 4.71435429668747e-05, | |
| "loss": 0.217, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.10535959688502061, | |
| "grad_norm": 0.5063177943229675, | |
| "learning_rate": 4.690350456072972e-05, | |
| "loss": 0.2155, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.10994044892349977, | |
| "grad_norm": 0.4963516294956207, | |
| "learning_rate": 4.666346615458473e-05, | |
| "loss": 0.2067, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.11452130096197893, | |
| "grad_norm": 0.6876777410507202, | |
| "learning_rate": 4.642342774843975e-05, | |
| "loss": 0.2368, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.11910215300045808, | |
| "grad_norm": 0.33562833070755005, | |
| "learning_rate": 4.618338934229477e-05, | |
| "loss": 0.2215, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.12368300503893724, | |
| "grad_norm": 0.45656174421310425, | |
| "learning_rate": 4.5943350936149786e-05, | |
| "loss": 0.2051, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.1282638570774164, | |
| "grad_norm": 0.5994214415550232, | |
| "learning_rate": 4.5703312530004805e-05, | |
| "loss": 0.2271, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.13284470911589555, | |
| "grad_norm": 0.6960343718528748, | |
| "learning_rate": 4.5463274123859816e-05, | |
| "loss": 0.1914, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.1374255611543747, | |
| "grad_norm": 0.5771898031234741, | |
| "learning_rate": 4.5223235717714835e-05, | |
| "loss": 0.2365, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.14200641319285387, | |
| "grad_norm": 0.3877688944339752, | |
| "learning_rate": 4.498319731156985e-05, | |
| "loss": 0.2018, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.14658726523133303, | |
| "grad_norm": 0.5205092430114746, | |
| "learning_rate": 4.474315890542487e-05, | |
| "loss": 0.2592, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.1511681172698122, | |
| "grad_norm": 0.8437159061431885, | |
| "learning_rate": 4.450312049927989e-05, | |
| "loss": 0.2497, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.15574896930829135, | |
| "grad_norm": 1.130053997039795, | |
| "learning_rate": 4.426308209313491e-05, | |
| "loss": 0.2258, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.1603298213467705, | |
| "grad_norm": 0.4410316050052643, | |
| "learning_rate": 4.402304368698992e-05, | |
| "loss": 0.2134, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.16491067338524965, | |
| "grad_norm": 0.7471569776535034, | |
| "learning_rate": 4.378300528084494e-05, | |
| "loss": 0.2121, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.1694915254237288, | |
| "grad_norm": 0.6091960072517395, | |
| "learning_rate": 4.354296687469995e-05, | |
| "loss": 0.2367, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.17407237746220797, | |
| "grad_norm": 0.37467148900032043, | |
| "learning_rate": 4.330292846855497e-05, | |
| "loss": 0.2127, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.17865322950068713, | |
| "grad_norm": 0.4893397390842438, | |
| "learning_rate": 4.306289006240999e-05, | |
| "loss": 0.2018, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.1832340815391663, | |
| "grad_norm": 0.41944998502731323, | |
| "learning_rate": 4.2822851656265006e-05, | |
| "loss": 0.2163, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.18781493357764545, | |
| "grad_norm": 0.327027291059494, | |
| "learning_rate": 4.2582813250120024e-05, | |
| "loss": 0.2274, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.1923957856161246, | |
| "grad_norm": 0.4042939245700836, | |
| "learning_rate": 4.2342774843975036e-05, | |
| "loss": 0.182, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.19697663765460377, | |
| "grad_norm": 0.5198964476585388, | |
| "learning_rate": 4.2102736437830054e-05, | |
| "loss": 0.2229, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.2015574896930829, | |
| "grad_norm": 0.5307021737098694, | |
| "learning_rate": 4.186269803168507e-05, | |
| "loss": 0.2123, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.20613834173156206, | |
| "grad_norm": 0.3677486777305603, | |
| "learning_rate": 4.162265962554009e-05, | |
| "loss": 0.2166, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.21071919377004122, | |
| "grad_norm": 0.38226303458213806, | |
| "learning_rate": 4.138262121939511e-05, | |
| "loss": 0.2389, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.21530004580852038, | |
| "grad_norm": 0.4093310534954071, | |
| "learning_rate": 4.114258281325012e-05, | |
| "loss": 0.1985, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.21988089784699955, | |
| "grad_norm": 0.3003683090209961, | |
| "learning_rate": 4.090254440710514e-05, | |
| "loss": 0.1982, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.2244617498854787, | |
| "grad_norm": 0.48493343591690063, | |
| "learning_rate": 4.066250600096015e-05, | |
| "loss": 0.1956, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.22904260192395787, | |
| "grad_norm": 0.44902732968330383, | |
| "learning_rate": 4.042246759481517e-05, | |
| "loss": 0.2145, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.22904260192395787, | |
| "eval_loss": 0.21088281273841858, | |
| "eval_runtime": 30.1602, | |
| "eval_samples_per_second": 16.081, | |
| "eval_steps_per_second": 8.057, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2183, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.1844959166464e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |