|
{ |
|
"best_metric": 0.06989976763725281, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 1.3410214168039538, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.026359143327841845, |
|
"grad_norm": 3.2261435985565186, |
|
"learning_rate": 5e-05, |
|
"loss": 2.8904, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.026359143327841845, |
|
"eval_loss": 4.496891498565674, |
|
"eval_runtime": 2.6493, |
|
"eval_samples_per_second": 18.873, |
|
"eval_steps_per_second": 4.907, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05271828665568369, |
|
"grad_norm": 3.7886242866516113, |
|
"learning_rate": 0.0001, |
|
"loss": 3.2601, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07907742998352553, |
|
"grad_norm": 4.198616027832031, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 3.3531, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.10543657331136738, |
|
"grad_norm": 4.185900688171387, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 2.7364, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.13179571663920922, |
|
"grad_norm": 3.605696439743042, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 2.3579, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15815485996705106, |
|
"grad_norm": 3.8062736988067627, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.147, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.18451400329489293, |
|
"grad_norm": 4.500556945800781, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 2.0153, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.21087314662273476, |
|
"grad_norm": 4.745126724243164, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 1.7782, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.2372322899505766, |
|
"grad_norm": 5.740296363830566, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 1.9259, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.26359143327841844, |
|
"grad_norm": 7.462158203125, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 1.8581, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2899505766062603, |
|
"grad_norm": 4.397566318511963, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 1.0152, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3163097199341021, |
|
"grad_norm": 2.9855945110321045, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 0.7193, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.342668863261944, |
|
"grad_norm": 2.0212604999542236, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 0.6057, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.36902800658978585, |
|
"grad_norm": 1.7968220710754395, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.538, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.3953871499176277, |
|
"grad_norm": 1.760416865348816, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 0.7498, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.42174629324546953, |
|
"grad_norm": 1.9944193363189697, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 0.7176, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.44810543657331137, |
|
"grad_norm": 1.9517091512680054, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 0.7564, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.4744645799011532, |
|
"grad_norm": 2.649627685546875, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.7469, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.500823723228995, |
|
"grad_norm": 3.311579704284668, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 0.7729, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5271828665568369, |
|
"grad_norm": 1.6942845582962036, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.3132, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5535420098846787, |
|
"grad_norm": 1.6794170141220093, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 0.2377, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5799011532125206, |
|
"grad_norm": 1.4418450593948364, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.2117, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.6062602965403624, |
|
"grad_norm": 1.11506986618042, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 0.2163, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.6326194398682042, |
|
"grad_norm": 1.2152330875396729, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.211, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.6589785831960461, |
|
"grad_norm": 1.5132273435592651, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 0.2629, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6589785831960461, |
|
"eval_loss": 0.22433076798915863, |
|
"eval_runtime": 2.0455, |
|
"eval_samples_per_second": 24.444, |
|
"eval_steps_per_second": 6.355, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.685337726523888, |
|
"grad_norm": 1.986502766609192, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.2799, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.7116968698517299, |
|
"grad_norm": 1.9361213445663452, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 0.3052, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.7380560131795717, |
|
"grad_norm": 3.370849370956421, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.4228, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.7644151565074135, |
|
"grad_norm": 3.750485420227051, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 0.2901, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.7907742998352554, |
|
"grad_norm": 2.9256770610809326, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.1042, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.8171334431630972, |
|
"grad_norm": 2.44817852973938, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 0.1124, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.8434925864909391, |
|
"grad_norm": 1.4348925352096558, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.1066, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.8698517298187809, |
|
"grad_norm": 1.0853517055511475, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 0.0853, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.8962108731466227, |
|
"grad_norm": 2.7698771953582764, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.127, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.9225700164744646, |
|
"grad_norm": 2.3615007400512695, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 0.2138, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.9489291598023064, |
|
"grad_norm": 2.252959966659546, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 0.2065, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.9752883031301482, |
|
"grad_norm": 4.12150239944458, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 0.2188, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.0247116968698518, |
|
"grad_norm": 5.144357681274414, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.3064, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.0510708401976936, |
|
"grad_norm": 0.48973578214645386, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 0.0159, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.0774299835255354, |
|
"grad_norm": 0.3477337658405304, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.0171, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.1037891268533773, |
|
"grad_norm": 0.7385883927345276, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 0.0308, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.130148270181219, |
|
"grad_norm": 0.7831564545631409, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.0379, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.156507413509061, |
|
"grad_norm": 0.731144368648529, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 0.0718, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.1828665568369028, |
|
"grad_norm": 1.3186781406402588, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.0928, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.2092257001647446, |
|
"grad_norm": 1.7480039596557617, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 0.1888, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.2355848434925865, |
|
"grad_norm": 1.843854546546936, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.1448, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.2619439868204283, |
|
"grad_norm": 0.9175070524215698, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 0.0842, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.2883031301482701, |
|
"grad_norm": 0.22026900947093964, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.0107, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.314662273476112, |
|
"grad_norm": 0.20567184686660767, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 0.0084, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.3410214168039538, |
|
"grad_norm": 0.2838402986526489, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0205, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.3410214168039538, |
|
"eval_loss": 0.06989976763725281, |
|
"eval_runtime": 2.0408, |
|
"eval_samples_per_second": 24.501, |
|
"eval_steps_per_second": 6.37, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.968083617316864e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|