|
{ |
|
"best_metric": 1.697729229927063, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0, |
|
"eval_steps": 25, |
|
"global_step": 42, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 7.020181655883789, |
|
"learning_rate": 5e-05, |
|
"loss": 4.6816, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"eval_loss": 9.965672492980957, |
|
"eval_runtime": 3.1129, |
|
"eval_samples_per_second": 30.518, |
|
"eval_steps_per_second": 3.855, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 15.591201782226562, |
|
"learning_rate": 0.0001, |
|
"loss": 7.9205, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": 23.866371154785156, |
|
"learning_rate": 9.98458666866564e-05, |
|
"loss": 11.1902, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 7.000945091247559, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 3.8928, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 13.886146545410156, |
|
"learning_rate": 9.861849601988383e-05, |
|
"loss": 4.675, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 30.203248977661133, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 5.8457, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 11.34516716003418, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.9694, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 15.420345306396484, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 2.7578, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": 17.757953643798828, |
|
"learning_rate": 9.263200821770461e-05, |
|
"loss": 2.3079, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 2.0385148525238037, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.0236, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7857142857142857, |
|
"grad_norm": 4.7338433265686035, |
|
"learning_rate": 8.802029828000156e-05, |
|
"loss": 1.9669, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 5.90278434753418, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.9986, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9285714285714286, |
|
"grad_norm": 13.437071800231934, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 2.2254, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 4.956202030181885, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.8723, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 1.7068158388137817, |
|
"learning_rate": 7.612492823579745e-05, |
|
"loss": 1.746, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 3.6583316326141357, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 1.8691, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2142857142857142, |
|
"grad_norm": 4.012146472930908, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.9479, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 1.376846432685852, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.5925, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.3571428571428572, |
|
"grad_norm": 3.3228981494903564, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 1.7874, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 2.1668920516967773, |
|
"learning_rate": 5.782172325201155e-05, |
|
"loss": 1.8105, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 2.120370626449585, |
|
"learning_rate": 5.392295478639225e-05, |
|
"loss": 1.5832, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"grad_norm": 2.284564971923828, |
|
"learning_rate": 5e-05, |
|
"loss": 1.7252, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.6428571428571428, |
|
"grad_norm": 1.7141883373260498, |
|
"learning_rate": 4.607704521360776e-05, |
|
"loss": 1.7305, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 1.3919540643692017, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 1.579, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"grad_norm": 1.5577592849731445, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 1.651, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"eval_loss": 1.697729229927063, |
|
"eval_runtime": 3.1206, |
|
"eval_samples_per_second": 30.443, |
|
"eval_steps_per_second": 3.845, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 1.2910879850387573, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 1.6083, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.9285714285714286, |
|
"grad_norm": 2.372886896133423, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.654, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.5181156396865845, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 1.5466, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.0714285714285716, |
|
"grad_norm": 0.9630998969078064, |
|
"learning_rate": 2.3875071764202563e-05, |
|
"loss": 1.4119, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 1.2351303100585938, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 1.5674, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.2142857142857144, |
|
"grad_norm": 2.0758800506591797, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 1.5598, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 1.076073169708252, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.4146, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.357142857142857, |
|
"grad_norm": 1.3192384243011475, |
|
"learning_rate": 1.1979701719998453e-05, |
|
"loss": 1.5492, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.4285714285714284, |
|
"grad_norm": 1.5484938621520996, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.5209, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.984063982963562, |
|
"learning_rate": 7.367991782295391e-06, |
|
"loss": 1.3867, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 1.18297278881073, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 1.5152, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.642857142857143, |
|
"grad_norm": 1.3486729860305786, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.4508, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.7142857142857144, |
|
"grad_norm": 1.0198802947998047, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 1.3897, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.7857142857142856, |
|
"grad_norm": 0.9609850645065308, |
|
"learning_rate": 1.3815039801161721e-06, |
|
"loss": 1.4722, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 1.0965021848678589, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 1.4413, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.928571428571429, |
|
"grad_norm": 1.669950008392334, |
|
"learning_rate": 1.5413331334360182e-07, |
|
"loss": 1.4793, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.8596750497817993, |
|
"learning_rate": 0.0, |
|
"loss": 1.4315, |
|
"step": 42 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 42, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.186562649353093e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|