|
{ |
|
"best_metric": 3.1944468021392822, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.6920415224913494, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01384083044982699, |
|
"grad_norm": 0.7828583121299744, |
|
"learning_rate": 5e-05, |
|
"loss": 6.6306, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01384083044982699, |
|
"eval_loss": 6.805291175842285, |
|
"eval_runtime": 6.2044, |
|
"eval_samples_per_second": 78.493, |
|
"eval_steps_per_second": 9.832, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02768166089965398, |
|
"grad_norm": 0.900705099105835, |
|
"learning_rate": 0.0001, |
|
"loss": 6.4903, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.04152249134948097, |
|
"grad_norm": 0.6845119595527649, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 6.5309, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05536332179930796, |
|
"grad_norm": 0.7316877245903015, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 6.4378, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06920415224913495, |
|
"grad_norm": 0.7706010937690735, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 6.408, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08304498269896193, |
|
"grad_norm": 0.9191716313362122, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 6.4436, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09688581314878893, |
|
"grad_norm": 0.9976472854614258, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 6.3941, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.11072664359861592, |
|
"grad_norm": 1.2480906248092651, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 6.3564, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1245674740484429, |
|
"grad_norm": 1.514481782913208, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 6.3036, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.1384083044982699, |
|
"grad_norm": 1.6174757480621338, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 6.1196, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1522491349480969, |
|
"grad_norm": 1.7518984079360962, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 6.1501, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.16608996539792387, |
|
"grad_norm": 1.8451406955718994, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 5.8514, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17993079584775087, |
|
"grad_norm": 1.4596737623214722, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 5.8396, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.19377162629757785, |
|
"grad_norm": 1.310354471206665, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 5.0548, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.20761245674740483, |
|
"grad_norm": 1.2472784519195557, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 4.8912, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.22145328719723184, |
|
"grad_norm": 1.3219356536865234, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 4.9125, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": 1.4386686086654663, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 4.8531, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.2491349480968858, |
|
"grad_norm": 1.4553345441818237, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 4.776, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2629757785467128, |
|
"grad_norm": 1.3721455335617065, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 4.5001, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2768166089965398, |
|
"grad_norm": 1.618059754371643, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 4.401, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2906574394463668, |
|
"grad_norm": 1.8476649522781372, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 4.1677, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.3044982698961938, |
|
"grad_norm": 2.0024266242980957, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 4.3977, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.31833910034602075, |
|
"grad_norm": 1.9315515756607056, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 4.2169, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.33217993079584773, |
|
"grad_norm": 1.7556025981903076, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 3.9926, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3460207612456747, |
|
"grad_norm": 1.6433321237564087, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 4.3006, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3460207612456747, |
|
"eval_loss": 3.8810713291168213, |
|
"eval_runtime": 6.2008, |
|
"eval_samples_per_second": 78.538, |
|
"eval_steps_per_second": 9.837, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.35986159169550175, |
|
"grad_norm": 1.2370139360427856, |
|
"learning_rate": 5e-05, |
|
"loss": 3.9109, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3737024221453287, |
|
"grad_norm": 1.0478001832962036, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 3.7508, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3875432525951557, |
|
"grad_norm": 0.9928375482559204, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 3.7824, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.4013840830449827, |
|
"grad_norm": 0.9907777905464172, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 3.6651, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.41522491349480967, |
|
"grad_norm": 1.028157114982605, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 3.5988, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4290657439446367, |
|
"grad_norm": 1.0831353664398193, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 3.4734, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.4429065743944637, |
|
"grad_norm": 1.1880797147750854, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 3.4019, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.45674740484429066, |
|
"grad_norm": 1.5488940477371216, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 3.5365, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.47058823529411764, |
|
"grad_norm": 1.6649410724639893, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 3.6033, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4844290657439446, |
|
"grad_norm": 1.5827367305755615, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 3.4936, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4982698961937716, |
|
"grad_norm": 1.582950234413147, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 3.4003, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5121107266435986, |
|
"grad_norm": 1.4960726499557495, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 3.3835, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5259515570934256, |
|
"grad_norm": 1.2201118469238281, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 3.5908, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5397923875432526, |
|
"grad_norm": 1.18841552734375, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 3.1628, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5536332179930796, |
|
"grad_norm": 1.048747181892395, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 3.1171, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5674740484429066, |
|
"grad_norm": 0.901454508304596, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 3.2492, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5813148788927336, |
|
"grad_norm": 1.0152961015701294, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 3.132, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5951557093425606, |
|
"grad_norm": 0.9326971173286438, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 3.0336, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6089965397923875, |
|
"grad_norm": 0.9532666802406311, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 3.1209, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6228373702422145, |
|
"grad_norm": 1.1372768878936768, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 3.2997, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6366782006920415, |
|
"grad_norm": 1.4541966915130615, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 3.3793, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6505190311418685, |
|
"grad_norm": 1.5919541120529175, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 3.261, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6643598615916955, |
|
"grad_norm": 1.6075881719589233, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 3.3155, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6782006920415224, |
|
"grad_norm": 1.4852365255355835, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 3.0955, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6920415224913494, |
|
"grad_norm": 1.521230697631836, |
|
"learning_rate": 0.0, |
|
"loss": 3.5398, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6920415224913494, |
|
"eval_loss": 3.1944468021392822, |
|
"eval_runtime": 6.2035, |
|
"eval_samples_per_second": 78.504, |
|
"eval_steps_per_second": 9.833, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3004277953855488e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|