|
{ |
|
"best_metric": 11.760108947753906, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.04805959389643157, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006407945852857543, |
|
"grad_norm": 0.018796086311340332, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 11.7657, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006407945852857543, |
|
"eval_loss": 11.764625549316406, |
|
"eval_runtime": 0.2382, |
|
"eval_samples_per_second": 209.889, |
|
"eval_steps_per_second": 54.571, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012815891705715086, |
|
"grad_norm": 0.01367566641420126, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 11.7661, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001922383755857263, |
|
"grad_norm": 0.014899961650371552, |
|
"learning_rate": 0.0001, |
|
"loss": 11.7662, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002563178341143017, |
|
"grad_norm": 0.015141118317842484, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 11.7662, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0032039729264287716, |
|
"grad_norm": 0.014948413707315922, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 11.7664, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003844767511714526, |
|
"grad_norm": 0.01567680388689041, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 11.7656, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00448556209700028, |
|
"grad_norm": 0.016142114996910095, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 11.7657, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005126356682286034, |
|
"grad_norm": 0.014875160530209541, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 11.7652, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005767151267571789, |
|
"grad_norm": 0.015255581587553024, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 11.7652, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006407945852857543, |
|
"grad_norm": 0.014767582528293133, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 11.7645, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007048740438143298, |
|
"grad_norm": 0.01818861812353134, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 11.7651, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007689535023429052, |
|
"grad_norm": 0.017870761454105377, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 11.7646, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.008330329608714807, |
|
"grad_norm": 0.01960948295891285, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 11.7654, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00897112419400056, |
|
"grad_norm": 0.0150187062099576, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 11.7652, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.009611918779286315, |
|
"grad_norm": 0.015270842239260674, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 11.7649, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010252713364572069, |
|
"grad_norm": 0.018579093739390373, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 11.7654, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.010893507949857825, |
|
"grad_norm": 0.018623633310198784, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 11.7655, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.011534302535143579, |
|
"grad_norm": 0.021639393642544746, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 11.764, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.012175097120429332, |
|
"grad_norm": 0.023298516869544983, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 11.7645, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.012815891705715086, |
|
"grad_norm": 0.023485591635107994, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 11.7654, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01345668629100084, |
|
"grad_norm": 0.021341130137443542, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 11.7633, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.014097480876286596, |
|
"grad_norm": 0.01896008849143982, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 11.7641, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01473827546157235, |
|
"grad_norm": 0.02342885173857212, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 11.7632, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.015379070046858104, |
|
"grad_norm": 0.02347911335527897, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 11.7628, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01601986463214386, |
|
"grad_norm": 0.025197699666023254, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 11.7631, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01601986463214386, |
|
"eval_loss": 11.763106346130371, |
|
"eval_runtime": 0.2386, |
|
"eval_samples_per_second": 209.58, |
|
"eval_steps_per_second": 54.491, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.016660659217429614, |
|
"grad_norm": 0.02223326452076435, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 11.7637, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.017301453802715368, |
|
"grad_norm": 0.020472681149840355, |
|
"learning_rate": 7.75e-05, |
|
"loss": 11.7644, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.01794224838800112, |
|
"grad_norm": 0.023782216012477875, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 11.7642, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.018583042973286876, |
|
"grad_norm": 0.0240348968654871, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 11.765, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01922383755857263, |
|
"grad_norm": 0.028316188603639603, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 11.7643, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.019864632143858384, |
|
"grad_norm": 0.02909417264163494, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 11.763, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.020505426729144138, |
|
"grad_norm": 0.029121633619070053, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 11.763, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02114622131442989, |
|
"grad_norm": 0.0339232012629509, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 11.7627, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02178701589971565, |
|
"grad_norm": 0.03640107810497284, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 11.7633, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.022427810485001403, |
|
"grad_norm": 0.03253023698925972, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 11.7621, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.023068605070287157, |
|
"grad_norm": 0.028020696714520454, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 11.7629, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02370939965557291, |
|
"grad_norm": 0.031051475554704666, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 11.7626, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.024350194240858665, |
|
"grad_norm": 0.04053696244955063, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 11.7636, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.02499098882614442, |
|
"grad_norm": 0.029524900019168854, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 11.7634, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.025631783411430173, |
|
"grad_norm": 0.03623311594128609, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 11.7624, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.026272577996715927, |
|
"grad_norm": 0.03804594278335571, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 11.763, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02691337258200168, |
|
"grad_norm": 0.03952088952064514, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 11.7628, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.027554167167287435, |
|
"grad_norm": 0.037932537496089935, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 11.763, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.028194961752573192, |
|
"grad_norm": 0.04360462725162506, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 11.7623, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.028835756337858946, |
|
"grad_norm": 0.04510717839002609, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 11.762, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0294765509231447, |
|
"grad_norm": 0.03812625631690025, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 11.762, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.030117345508430454, |
|
"grad_norm": 0.04157785326242447, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 11.7614, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.030758140093716208, |
|
"grad_norm": 0.044761281460523605, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 11.762, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03139893467900196, |
|
"grad_norm": 0.04416681081056595, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 11.7618, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03203972926428772, |
|
"grad_norm": 0.0453573577105999, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 11.7605, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03203972926428772, |
|
"eval_loss": 11.761113166809082, |
|
"eval_runtime": 0.2345, |
|
"eval_samples_per_second": 213.264, |
|
"eval_steps_per_second": 55.449, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03268052384957347, |
|
"grad_norm": 0.03685550019145012, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 11.7635, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.03332131843485923, |
|
"grad_norm": 0.04207245260477066, |
|
"learning_rate": 3.082151762439293e-05, |
|
"loss": 11.7628, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.03396211302014498, |
|
"grad_norm": 0.047665830701589584, |
|
"learning_rate": 2.9189060364202943e-05, |
|
"loss": 11.7617, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.034602907605430736, |
|
"grad_norm": 0.050436247140169144, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 11.7613, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.035243702190716486, |
|
"grad_norm": 0.04612034186720848, |
|
"learning_rate": 2.6074557564105727e-05, |
|
"loss": 11.7613, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.03588449677600224, |
|
"grad_norm": 0.04736921191215515, |
|
"learning_rate": 2.459844065729529e-05, |
|
"loss": 11.7612, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.036525291361287994, |
|
"grad_norm": 0.05022180452942848, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 11.7604, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.03716608594657375, |
|
"grad_norm": 0.04974595829844475, |
|
"learning_rate": 2.1822519843544424e-05, |
|
"loss": 11.76, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.03780688053185951, |
|
"grad_norm": 0.047848932445049286, |
|
"learning_rate": 2.0528000059645997e-05, |
|
"loss": 11.7615, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.03844767511714526, |
|
"grad_norm": 0.04960588365793228, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 11.7612, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03908846970243102, |
|
"grad_norm": 0.05584871768951416, |
|
"learning_rate": 1.8138158006995364e-05, |
|
"loss": 11.7601, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.03972926428771677, |
|
"grad_norm": 0.04442853853106499, |
|
"learning_rate": 1.7047384938420154e-05, |
|
"loss": 11.7609, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.040370058873002525, |
|
"grad_norm": 0.04721665382385254, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 11.7637, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.041010853458288275, |
|
"grad_norm": 0.04801863431930542, |
|
"learning_rate": 1.5084512506980026e-05, |
|
"loss": 11.7612, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.04165164804357403, |
|
"grad_norm": 0.04716704413294792, |
|
"learning_rate": 1.4216149583350754e-05, |
|
"loss": 11.7627, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.04229244262885978, |
|
"grad_norm": 0.05011888965964317, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 11.761, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.04293323721414554, |
|
"grad_norm": 0.04726472124457359, |
|
"learning_rate": 1.2713832064634126e-05, |
|
"loss": 11.7608, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0435740317994313, |
|
"grad_norm": 0.053742505609989166, |
|
"learning_rate": 1.2082737216329794e-05, |
|
"loss": 11.76, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.04421482638471705, |
|
"grad_norm": 0.05879789963364601, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 11.7597, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.044855620970002806, |
|
"grad_norm": 0.05244609713554382, |
|
"learning_rate": 1.1066679679603e-05, |
|
"loss": 11.7611, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.04549641555528856, |
|
"grad_norm": 0.05740131437778473, |
|
"learning_rate": 1.0683651114450641e-05, |
|
"loss": 11.7612, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.046137210140574314, |
|
"grad_norm": 0.04920673370361328, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 11.7614, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.046778004725860065, |
|
"grad_norm": 0.05816422030329704, |
|
"learning_rate": 1.017123858587145e-05, |
|
"loss": 11.7607, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.04741879931114582, |
|
"grad_norm": 0.05213126912713051, |
|
"learning_rate": 1.00428300288164e-05, |
|
"loss": 11.7598, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.04805959389643157, |
|
"grad_norm": 0.061734773218631744, |
|
"learning_rate": 1e-05, |
|
"loss": 11.7597, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04805959389643157, |
|
"eval_loss": 11.760108947753906, |
|
"eval_runtime": 0.2399, |
|
"eval_samples_per_second": 208.462, |
|
"eval_steps_per_second": 54.2, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 246955923210240.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|