|
{ |
|
"best_metric": 2.7374379634857178, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.6872852233676976, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013745704467353952, |
|
"grad_norm": 0.633126974105835, |
|
"learning_rate": 5e-05, |
|
"loss": 3.7941, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013745704467353952, |
|
"eval_loss": 3.7618861198425293, |
|
"eval_runtime": 30.5394, |
|
"eval_samples_per_second": 16.045, |
|
"eval_steps_per_second": 2.03, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.027491408934707903, |
|
"grad_norm": 0.654931902885437, |
|
"learning_rate": 0.0001, |
|
"loss": 3.7007, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.041237113402061855, |
|
"grad_norm": 0.6610121726989746, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 3.8049, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.054982817869415807, |
|
"grad_norm": 0.6573833227157593, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 3.6173, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06872852233676977, |
|
"grad_norm": 0.6862820982933044, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 3.5289, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08247422680412371, |
|
"grad_norm": 0.6219794750213623, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 3.4455, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09621993127147767, |
|
"grad_norm": 0.6177584528923035, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 3.3436, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.10996563573883161, |
|
"grad_norm": 0.5731457471847534, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 3.2447, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.12371134020618557, |
|
"grad_norm": 0.6419684290885925, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 3.2408, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13745704467353953, |
|
"grad_norm": 0.6953072547912598, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 3.0887, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.15120274914089346, |
|
"grad_norm": 0.8918637037277222, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 3.1175, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.16494845360824742, |
|
"grad_norm": 1.198569655418396, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 3.2409, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17869415807560138, |
|
"grad_norm": 0.6394647359848022, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 3.2072, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.19243986254295534, |
|
"grad_norm": 0.7178822755813599, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 3.1186, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.20618556701030927, |
|
"grad_norm": 0.7316906452178955, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 3.1448, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.21993127147766323, |
|
"grad_norm": 0.7377943992614746, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 2.9385, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.23367697594501718, |
|
"grad_norm": 0.646327793598175, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 3.0327, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.24742268041237114, |
|
"grad_norm": 0.5574729442596436, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 3.0255, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2611683848797251, |
|
"grad_norm": 0.5094675421714783, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 2.9431, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.27491408934707906, |
|
"grad_norm": 0.4401124119758606, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 2.8594, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.28865979381443296, |
|
"grad_norm": 0.4236370325088501, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 2.8503, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.3024054982817869, |
|
"grad_norm": 0.48745620250701904, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 2.9523, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3161512027491409, |
|
"grad_norm": 0.5854716300964355, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 2.8143, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.32989690721649484, |
|
"grad_norm": 0.7844096422195435, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 2.6956, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3436426116838488, |
|
"grad_norm": 1.3506336212158203, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 2.992, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3436426116838488, |
|
"eval_loss": 2.8326058387756348, |
|
"eval_runtime": 30.509, |
|
"eval_samples_per_second": 16.061, |
|
"eval_steps_per_second": 2.032, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.35738831615120276, |
|
"grad_norm": 0.5904625654220581, |
|
"learning_rate": 5e-05, |
|
"loss": 3.0126, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3711340206185567, |
|
"grad_norm": 0.6415864825248718, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 2.9682, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3848797250859107, |
|
"grad_norm": 0.5782291889190674, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 2.9303, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.39862542955326463, |
|
"grad_norm": 0.5609015822410583, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 2.9249, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.41237113402061853, |
|
"grad_norm": 0.48684611916542053, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 2.8978, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4261168384879725, |
|
"grad_norm": 0.45882076025009155, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 2.8614, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.43986254295532645, |
|
"grad_norm": 0.40644049644470215, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.7954, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4536082474226804, |
|
"grad_norm": 0.4016781449317932, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 2.7111, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.46735395189003437, |
|
"grad_norm": 0.45421966910362244, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.7871, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.48109965635738833, |
|
"grad_norm": 0.48663410544395447, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 2.7124, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4948453608247423, |
|
"grad_norm": 0.6618165373802185, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 2.921, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5085910652920962, |
|
"grad_norm": 0.9185125231742859, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 2.6715, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5223367697594502, |
|
"grad_norm": 0.3857472538948059, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.971, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5360824742268041, |
|
"grad_norm": 0.3716031312942505, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 2.9631, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5498281786941581, |
|
"grad_norm": 0.4130760133266449, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.9389, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.563573883161512, |
|
"grad_norm": 0.37855178117752075, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 2.8596, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5773195876288659, |
|
"grad_norm": 0.3730044364929199, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.8893, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5910652920962199, |
|
"grad_norm": 0.399122953414917, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 2.7885, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6048109965635738, |
|
"grad_norm": 0.38850271701812744, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.8442, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6185567010309279, |
|
"grad_norm": 0.41439515352249146, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 2.652, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6323024054982818, |
|
"grad_norm": 0.41602662205696106, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 2.6787, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6460481099656358, |
|
"grad_norm": 0.4596008360385895, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 2.7676, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6597938144329897, |
|
"grad_norm": 0.4760560691356659, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 2.6652, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6735395189003437, |
|
"grad_norm": 0.6931067109107971, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 2.59, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6872852233676976, |
|
"grad_norm": 1.223355770111084, |
|
"learning_rate": 0.0, |
|
"loss": 2.7209, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6872852233676976, |
|
"eval_loss": 2.7374379634857178, |
|
"eval_runtime": 30.5331, |
|
"eval_samples_per_second": 16.048, |
|
"eval_steps_per_second": 2.031, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.624057810649088e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|