|
{ |
|
"best_metric": 1.246433138847351, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.5012531328320802, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.020050125313283207, |
|
"grad_norm": 57.262550354003906, |
|
"learning_rate": 5e-05, |
|
"loss": 29.8366, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.020050125313283207, |
|
"eval_loss": 1.9029018878936768, |
|
"eval_runtime": 9.9628, |
|
"eval_samples_per_second": 33.826, |
|
"eval_steps_per_second": 4.316, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.040100250626566414, |
|
"grad_norm": 61.9777946472168, |
|
"learning_rate": 0.0001, |
|
"loss": 30.5166, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.06015037593984962, |
|
"grad_norm": 41.373321533203125, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 28.2141, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.08020050125313283, |
|
"grad_norm": 35.427249908447266, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 25.9472, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.10025062656641603, |
|
"grad_norm": 32.058868408203125, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 25.0425, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12030075187969924, |
|
"grad_norm": 31.39280891418457, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 22.7109, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.14035087719298245, |
|
"grad_norm": 27.022315979003906, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 22.0858, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.16040100250626566, |
|
"grad_norm": 22.689998626708984, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 21.8407, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.18045112781954886, |
|
"grad_norm": 23.095903396606445, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 21.4493, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.20050125313283207, |
|
"grad_norm": 15.283258438110352, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 20.7956, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.22055137844611528, |
|
"grad_norm": 19.957977294921875, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 20.4935, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.24060150375939848, |
|
"grad_norm": 14.825860023498535, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 21.3076, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2606516290726817, |
|
"grad_norm": 32.27811813354492, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 22.9758, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.2807017543859649, |
|
"grad_norm": 14.461236000061035, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 21.7338, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.3007518796992481, |
|
"grad_norm": 13.161033630371094, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 21.4966, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.3208020050125313, |
|
"grad_norm": 13.530344009399414, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 20.3507, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.3408521303258145, |
|
"grad_norm": 15.065783500671387, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 19.967, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3609022556390977, |
|
"grad_norm": 10.283108711242676, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 20.2849, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 11.738165855407715, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 19.5285, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.40100250626566414, |
|
"grad_norm": 12.6289701461792, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 19.7657, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 11.915717124938965, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 19.1391, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.44110275689223055, |
|
"grad_norm": 12.708892822265625, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 18.9334, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.46115288220551376, |
|
"grad_norm": 11.339106559753418, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 19.3876, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.48120300751879697, |
|
"grad_norm": 13.785265922546387, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 19.19, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.5012531328320802, |
|
"grad_norm": 13.614476203918457, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 20.1636, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5012531328320802, |
|
"eval_loss": 1.246433138847351, |
|
"eval_runtime": 9.9398, |
|
"eval_samples_per_second": 33.904, |
|
"eval_steps_per_second": 4.326, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.829013890367488e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|