|
{ |
|
"best_metric": 1.5668654441833496, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.40526849037487334, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008105369807497468, |
|
"grad_norm": 7.904720783233643, |
|
"learning_rate": 5e-05, |
|
"loss": 2.4502, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008105369807497468, |
|
"eval_loss": 3.2280056476593018, |
|
"eval_runtime": 23.8854, |
|
"eval_samples_per_second": 34.791, |
|
"eval_steps_per_second": 4.354, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.016210739614994935, |
|
"grad_norm": 8.790853500366211, |
|
"learning_rate": 0.0001, |
|
"loss": 2.439, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0243161094224924, |
|
"grad_norm": 4.56343412399292, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 2.4184, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03242147922998987, |
|
"grad_norm": 4.2467732429504395, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 2.0382, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.040526849037487336, |
|
"grad_norm": 1.8644218444824219, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 1.8382, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0486322188449848, |
|
"grad_norm": 1.9568532705307007, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.8509, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05673758865248227, |
|
"grad_norm": 1.110546350479126, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.8251, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06484295845997974, |
|
"grad_norm": 1.1750296354293823, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.9507, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0729483282674772, |
|
"grad_norm": 1.0463156700134277, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.9353, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08105369807497467, |
|
"grad_norm": 0.6957851648330688, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.0441, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08915906788247213, |
|
"grad_norm": 1.9994052648544312, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 2.0345, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0972644376899696, |
|
"grad_norm": 1.3627257347106934, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 2.3265, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.10536980749746708, |
|
"grad_norm": 0.2764863073825836, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.5045, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11347517730496454, |
|
"grad_norm": 0.2644999921321869, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.392, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12158054711246201, |
|
"grad_norm": 0.22837163507938385, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.2861, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.12968591691995948, |
|
"grad_norm": 0.22888338565826416, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.2934, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.13779128672745694, |
|
"grad_norm": 0.21393026411533356, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.3441, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1458966565349544, |
|
"grad_norm": 0.17430530488491058, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.3398, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1540020263424519, |
|
"grad_norm": 0.21894441545009613, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.4612, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16210739614994935, |
|
"grad_norm": 0.2172875702381134, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.5441, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1702127659574468, |
|
"grad_norm": 0.22415199875831604, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.6691, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.17831813576494426, |
|
"grad_norm": 0.33313316106796265, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.763, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18642350557244175, |
|
"grad_norm": 0.26398777961730957, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.873, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.1945288753799392, |
|
"grad_norm": 0.3155348300933838, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 2.0692, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.20263424518743667, |
|
"grad_norm": 0.5277456641197205, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 2.3044, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.20263424518743667, |
|
"eval_loss": 1.598935842514038, |
|
"eval_runtime": 23.8955, |
|
"eval_samples_per_second": 34.776, |
|
"eval_steps_per_second": 4.352, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21073961499493415, |
|
"grad_norm": 0.250800222158432, |
|
"learning_rate": 5e-05, |
|
"loss": 1.4023, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.2188449848024316, |
|
"grad_norm": 0.24198980629444122, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 1.3362, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.22695035460992907, |
|
"grad_norm": 0.18812240660190582, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.3514, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23505572441742653, |
|
"grad_norm": 0.2894032597541809, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.2495, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.24316109422492402, |
|
"grad_norm": 0.21906958520412445, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.444, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2512664640324215, |
|
"grad_norm": 0.2103070467710495, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.5269, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.25937183383991896, |
|
"grad_norm": 0.2812809646129608, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.4841, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2674772036474164, |
|
"grad_norm": 0.24371977150440216, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.664, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2755825734549139, |
|
"grad_norm": 0.26612022519111633, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.6387, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.28368794326241137, |
|
"grad_norm": 0.21530385315418243, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.7245, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2917933130699088, |
|
"grad_norm": 0.2958863377571106, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.9309, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2998986828774063, |
|
"grad_norm": 0.4526556730270386, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.8786, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3080040526849038, |
|
"grad_norm": 0.14817576110363007, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.4305, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3161094224924012, |
|
"grad_norm": 0.12831979990005493, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.2881, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3242147922998987, |
|
"grad_norm": 0.15420038998126984, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.2267, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3323201621073962, |
|
"grad_norm": 0.12769393622875214, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.2373, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3404255319148936, |
|
"grad_norm": 0.13905879855155945, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.3497, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3485309017223911, |
|
"grad_norm": 0.13880909979343414, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.3607, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3566362715298885, |
|
"grad_norm": 0.12389617413282394, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.4539, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.364741641337386, |
|
"grad_norm": 0.1297137439250946, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.5299, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3728470111448835, |
|
"grad_norm": 0.16556890308856964, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.6069, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 0.15601517260074615, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.6373, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3890577507598784, |
|
"grad_norm": 0.16503740847110748, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.7971, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3971631205673759, |
|
"grad_norm": 0.21679052710533142, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.9231, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.40526849037487334, |
|
"grad_norm": 0.39398708939552307, |
|
"learning_rate": 0.0, |
|
"loss": 2.1675, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.40526849037487334, |
|
"eval_loss": 1.5668654441833496, |
|
"eval_runtime": 23.9003, |
|
"eval_samples_per_second": 34.769, |
|
"eval_steps_per_second": 4.351, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.305284264198144e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|