|
{ |
|
"best_metric": 7.95767068862915, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.4625614339404452, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009251228678808905, |
|
"grad_norm": 4.89844274520874, |
|
"learning_rate": 5e-05, |
|
"loss": 9.8817, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009251228678808905, |
|
"eval_loss": 9.016510963439941, |
|
"eval_runtime": 0.7476, |
|
"eval_samples_per_second": 66.879, |
|
"eval_steps_per_second": 17.389, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01850245735761781, |
|
"grad_norm": 3.1463985443115234, |
|
"learning_rate": 0.0001, |
|
"loss": 9.2431, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.027753686036426712, |
|
"grad_norm": 2.822237014770508, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 9.0718, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03700491471523562, |
|
"grad_norm": 2.731067180633545, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 8.8489, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04625614339404452, |
|
"grad_norm": 2.654003143310547, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 8.8476, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.055507372072853424, |
|
"grad_norm": 2.670952558517456, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 8.6677, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06475860075166233, |
|
"grad_norm": 2.6398637294769287, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 8.6832, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07400982943047124, |
|
"grad_norm": 2.579364538192749, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 8.5877, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08326105810928014, |
|
"grad_norm": 2.7029902935028076, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 8.5979, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09251228678808904, |
|
"grad_norm": 2.9253904819488525, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 8.5614, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10176351546689795, |
|
"grad_norm": 3.1259610652923584, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 8.7359, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11101474414570685, |
|
"grad_norm": 4.356231689453125, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 8.9214, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12026597282451576, |
|
"grad_norm": 5.031243801116943, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 9.2625, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.12951720150332466, |
|
"grad_norm": 2.5803394317626953, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 8.6955, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.13876843018213356, |
|
"grad_norm": 1.9104470014572144, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 8.4649, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.14801965886094248, |
|
"grad_norm": 1.7666088342666626, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 8.4588, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.15727088753975138, |
|
"grad_norm": 1.6460059881210327, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 8.3894, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.16652211621856028, |
|
"grad_norm": 1.698133945465088, |
|
"learning_rate": 7.75e-05, |
|
"loss": 8.3618, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.17577334489736918, |
|
"grad_norm": 1.5541409254074097, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 8.2921, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.18502457357617808, |
|
"grad_norm": 1.6291533708572388, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 8.1995, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.194275802254987, |
|
"grad_norm": 1.7791177034378052, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 8.1354, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2035270309337959, |
|
"grad_norm": 1.7086344957351685, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 8.1375, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2127782596126048, |
|
"grad_norm": 1.8993000984191895, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 8.141, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2220294882914137, |
|
"grad_norm": 2.3244051933288574, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 8.312, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2312807169702226, |
|
"grad_norm": 4.274384021759033, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 8.7624, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2312807169702226, |
|
"eval_loss": 8.139893531799316, |
|
"eval_runtime": 0.7595, |
|
"eval_samples_per_second": 65.836, |
|
"eval_steps_per_second": 17.117, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24053194564903152, |
|
"grad_norm": 4.174472332000732, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 8.7814, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.24978317432784042, |
|
"grad_norm": 1.737717628479004, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 8.2499, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2590344030066493, |
|
"grad_norm": 1.5726858377456665, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 8.1994, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2682856316854582, |
|
"grad_norm": 1.4943739175796509, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 8.143, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2775368603642671, |
|
"grad_norm": 1.4114775657653809, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 8.1014, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.286788089043076, |
|
"grad_norm": 1.3771249055862427, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 8.0529, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.29603931772188496, |
|
"grad_norm": 1.3822271823883057, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 7.9885, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.30529054640069386, |
|
"grad_norm": 1.4392659664154053, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 7.9505, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.31454177507950276, |
|
"grad_norm": 1.3860464096069336, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 8.0864, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.32379300375831166, |
|
"grad_norm": 1.371462345123291, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 8.0629, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.33304423243712056, |
|
"grad_norm": 1.6449263095855713, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 8.1709, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.34229546111592946, |
|
"grad_norm": 2.209439277648926, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 8.3228, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.35154668979473835, |
|
"grad_norm": 5.064708232879639, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 8.7106, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.36079791847354725, |
|
"grad_norm": 2.071536064147949, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 8.3119, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.37004914715235615, |
|
"grad_norm": 1.3740651607513428, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 8.0236, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3793003758311651, |
|
"grad_norm": 1.3041908740997314, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 8.1272, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.388551604509974, |
|
"grad_norm": 1.2141309976577759, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 7.9449, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3978028331887829, |
|
"grad_norm": 1.2382042407989502, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 8.078, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4070540618675918, |
|
"grad_norm": 1.2371668815612793, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 8.003, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4163052905464007, |
|
"grad_norm": 1.2782078981399536, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 8.1092, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4255565192252096, |
|
"grad_norm": 1.2752069234848022, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 7.9829, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.4348077479040185, |
|
"grad_norm": 1.3277031183242798, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 8.0168, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.4440589765828274, |
|
"grad_norm": 1.4168155193328857, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 8.0042, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4533102052616363, |
|
"grad_norm": 1.7632670402526855, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 8.095, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4625614339404452, |
|
"grad_norm": 3.518686532974243, |
|
"learning_rate": 1e-05, |
|
"loss": 8.2842, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4625614339404452, |
|
"eval_loss": 7.95767068862915, |
|
"eval_runtime": 0.7539, |
|
"eval_samples_per_second": 66.322, |
|
"eval_steps_per_second": 17.244, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 232340067975168.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|