|
{ |
|
"best_metric": 2.4954833984375, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0270947639368692, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000541895278737384, |
|
"grad_norm": 1.312487244606018, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 12.0843, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000541895278737384, |
|
"eval_loss": 2.568927049636841, |
|
"eval_runtime": 356.6215, |
|
"eval_samples_per_second": 8.715, |
|
"eval_steps_per_second": 2.179, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001083790557474768, |
|
"grad_norm": 2.0385873317718506, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 13.4083, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001625685836212152, |
|
"grad_norm": 1.9277825355529785, |
|
"learning_rate": 1.5e-06, |
|
"loss": 13.6711, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002167581114949536, |
|
"grad_norm": 2.5886740684509277, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 14.1684, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00270947639368692, |
|
"grad_norm": 2.3235366344451904, |
|
"learning_rate": 2.5e-06, |
|
"loss": 15.3074, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003251371672424304, |
|
"grad_norm": 2.580097198486328, |
|
"learning_rate": 3e-06, |
|
"loss": 15.5703, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.003793266951161688, |
|
"grad_norm": 2.3437869548797607, |
|
"learning_rate": 3.5e-06, |
|
"loss": 15.6797, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.004335162229899072, |
|
"grad_norm": 2.1751561164855957, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 15.6452, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004877057508636456, |
|
"grad_norm": 2.4562244415283203, |
|
"learning_rate": 4.5e-06, |
|
"loss": 16.0855, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00541895278737384, |
|
"grad_norm": 2.959887981414795, |
|
"learning_rate": 5e-06, |
|
"loss": 16.5416, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005960848066111224, |
|
"grad_norm": 2.9056694507598877, |
|
"learning_rate": 4.99847706754774e-06, |
|
"loss": 15.9268, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.006502743344848608, |
|
"grad_norm": 2.3838143348693848, |
|
"learning_rate": 4.993910125649561e-06, |
|
"loss": 17.0944, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.007044638623585992, |
|
"grad_norm": 3.266652822494507, |
|
"learning_rate": 4.986304738420684e-06, |
|
"loss": 17.1496, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.007586533902323376, |
|
"grad_norm": 3.1640474796295166, |
|
"learning_rate": 4.975670171853926e-06, |
|
"loss": 17.4364, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00812842918106076, |
|
"grad_norm": 3.757476329803467, |
|
"learning_rate": 4.962019382530521e-06, |
|
"loss": 17.5002, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.008670324459798144, |
|
"grad_norm": 3.355529546737671, |
|
"learning_rate": 4.9453690018345144e-06, |
|
"loss": 16.9041, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.009212219738535529, |
|
"grad_norm": 3.3027448654174805, |
|
"learning_rate": 4.925739315689991e-06, |
|
"loss": 18.0518, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.009754115017272911, |
|
"grad_norm": 3.941869020462036, |
|
"learning_rate": 4.903154239845798e-06, |
|
"loss": 17.2925, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.010296010296010296, |
|
"grad_norm": 3.906616687774658, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"loss": 18.5134, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01083790557474768, |
|
"grad_norm": 4.283662796020508, |
|
"learning_rate": 4.849231551964771e-06, |
|
"loss": 17.6599, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.011379800853485065, |
|
"grad_norm": 3.5966744422912598, |
|
"learning_rate": 4.817959636416969e-06, |
|
"loss": 17.765, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.011921696132222447, |
|
"grad_norm": 3.567039966583252, |
|
"learning_rate": 4.783863644106502e-06, |
|
"loss": 20.5735, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.012463591410959832, |
|
"grad_norm": 4.263599395751953, |
|
"learning_rate": 4.746985115747918e-06, |
|
"loss": 18.9028, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.013005486689697216, |
|
"grad_norm": 3.6329641342163086, |
|
"learning_rate": 4.707368982147318e-06, |
|
"loss": 18.6384, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0135473819684346, |
|
"grad_norm": 5.154904365539551, |
|
"learning_rate": 4.665063509461098e-06, |
|
"loss": 19.6976, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.014089277247171983, |
|
"grad_norm": 4.175611972808838, |
|
"learning_rate": 4.620120240391065e-06, |
|
"loss": 18.8855, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.014631172525909368, |
|
"grad_norm": 5.684014797210693, |
|
"learning_rate": 4.572593931387604e-06, |
|
"loss": 17.9151, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.015173067804646752, |
|
"grad_norm": 3.9782209396362305, |
|
"learning_rate": 4.522542485937369e-06, |
|
"loss": 18.1362, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.015714963083384135, |
|
"grad_norm": 5.732182025909424, |
|
"learning_rate": 4.470026884016805e-06, |
|
"loss": 20.7214, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01625685836212152, |
|
"grad_norm": 5.507081031799316, |
|
"learning_rate": 4.415111107797445e-06, |
|
"loss": 19.7423, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.016798753640858904, |
|
"grad_norm": 5.731754779815674, |
|
"learning_rate": 4.357862063693486e-06, |
|
"loss": 22.4296, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.017340648919596288, |
|
"grad_norm": 4.9393463134765625, |
|
"learning_rate": 4.2983495008466285e-06, |
|
"loss": 19.3488, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.017882544198333673, |
|
"grad_norm": 4.795718669891357, |
|
"learning_rate": 4.236645926147493e-06, |
|
"loss": 19.5817, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.018424439477071057, |
|
"grad_norm": 5.4530463218688965, |
|
"learning_rate": 4.172826515897146e-06, |
|
"loss": 20.7245, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01896633475580844, |
|
"grad_norm": 5.843173980712891, |
|
"learning_rate": 4.106969024216348e-06, |
|
"loss": 22.3009, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.019508230034545822, |
|
"grad_norm": 6.191988945007324, |
|
"learning_rate": 4.039153688314146e-06, |
|
"loss": 19.8796, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.020050125313283207, |
|
"grad_norm": 6.448052883148193, |
|
"learning_rate": 3.969463130731183e-06, |
|
"loss": 21.6479, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02059202059202059, |
|
"grad_norm": 11.463981628417969, |
|
"learning_rate": 3.897982258676867e-06, |
|
"loss": 21.8738, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.021133915870757976, |
|
"grad_norm": 6.69301700592041, |
|
"learning_rate": 3.824798160583012e-06, |
|
"loss": 21.8778, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02167581114949536, |
|
"grad_norm": 6.770164489746094, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 22.1602, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.022217706428232745, |
|
"grad_norm": 7.03548002243042, |
|
"learning_rate": 3.6736789069647273e-06, |
|
"loss": 22.7901, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02275960170697013, |
|
"grad_norm": 9.729033470153809, |
|
"learning_rate": 3.595927866972694e-06, |
|
"loss": 24.2832, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.023301496985707514, |
|
"grad_norm": 7.9464335441589355, |
|
"learning_rate": 3.516841607689501e-06, |
|
"loss": 25.8137, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.023843392264444895, |
|
"grad_norm": 8.11252212524414, |
|
"learning_rate": 3.436516483539781e-06, |
|
"loss": 28.7299, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.02438528754318228, |
|
"grad_norm": 9.726422309875488, |
|
"learning_rate": 3.3550503583141726e-06, |
|
"loss": 26.6308, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.024927182821919663, |
|
"grad_norm": 11.935210227966309, |
|
"learning_rate": 3.272542485937369e-06, |
|
"loss": 30.5609, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.025469078100657048, |
|
"grad_norm": 10.29076099395752, |
|
"learning_rate": 3.189093389542498e-06, |
|
"loss": 28.0565, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.026010973379394432, |
|
"grad_norm": 13.732169151306152, |
|
"learning_rate": 3.1048047389991693e-06, |
|
"loss": 33.951, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.026552868658131817, |
|
"grad_norm": 12.743755340576172, |
|
"learning_rate": 3.019779227044398e-06, |
|
"loss": 35.0371, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0270947639368692, |
|
"grad_norm": 19.506738662719727, |
|
"learning_rate": 2.9341204441673267e-06, |
|
"loss": 42.8953, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0270947639368692, |
|
"eval_loss": 2.4954833984375, |
|
"eval_runtime": 358.0494, |
|
"eval_samples_per_second": 8.68, |
|
"eval_steps_per_second": 2.17, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.2727399074496512e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|