|
{ |
|
"best_metric": 1.2940391302108765, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.1605839416058394, |
|
"eval_steps": 25, |
|
"global_step": 39, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07785888077858881, |
|
"grad_norm": 1.1597152948379517, |
|
"learning_rate": 5e-05, |
|
"loss": 1.9417, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07785888077858881, |
|
"eval_loss": 2.3890843391418457, |
|
"eval_runtime": 0.9732, |
|
"eval_samples_per_second": 51.378, |
|
"eval_steps_per_second": 13.358, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.15571776155717762, |
|
"grad_norm": 1.4161957502365112, |
|
"learning_rate": 0.0001, |
|
"loss": 2.156, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.23357664233576642, |
|
"grad_norm": 2.1230530738830566, |
|
"learning_rate": 9.983788698441369e-05, |
|
"loss": 2.498, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.31143552311435524, |
|
"grad_norm": 0.6756812930107117, |
|
"learning_rate": 9.935271596564688e-05, |
|
"loss": 1.8508, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.38929440389294406, |
|
"grad_norm": 0.8647328019142151, |
|
"learning_rate": 9.854798261200746e-05, |
|
"loss": 1.9281, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.46715328467153283, |
|
"grad_norm": 1.298797607421875, |
|
"learning_rate": 9.74294850457488e-05, |
|
"loss": 2.1033, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5450121654501217, |
|
"grad_norm": 0.7344125509262085, |
|
"learning_rate": 9.600528206746612e-05, |
|
"loss": 1.7068, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.6228710462287105, |
|
"grad_norm": 1.1638606786727905, |
|
"learning_rate": 9.428563509225347e-05, |
|
"loss": 1.7034, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.7007299270072993, |
|
"grad_norm": 1.5177010297775269, |
|
"learning_rate": 9.22829342159729e-05, |
|
"loss": 1.8685, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7785888077858881, |
|
"grad_norm": 0.8456931114196777, |
|
"learning_rate": 9.001160894432978e-05, |
|
"loss": 1.518, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8564476885644768, |
|
"grad_norm": 1.1930115222930908, |
|
"learning_rate": 8.74880242279536e-05, |
|
"loss": 1.569, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.9343065693430657, |
|
"grad_norm": 1.5629873275756836, |
|
"learning_rate": 8.473036255255366e-05, |
|
"loss": 1.6445, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.0535279805352797, |
|
"grad_norm": 1.1714144945144653, |
|
"learning_rate": 8.175849293369291e-05, |
|
"loss": 2.489, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.1313868613138687, |
|
"grad_norm": 0.8845990300178528, |
|
"learning_rate": 7.859382776007543e-05, |
|
"loss": 1.3716, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.2092457420924574, |
|
"grad_norm": 1.3474788665771484, |
|
"learning_rate": 7.525916851679529e-05, |
|
"loss": 1.4771, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.287104622871046, |
|
"grad_norm": 1.5304101705551147, |
|
"learning_rate": 7.177854150011389e-05, |
|
"loss": 1.3313, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.364963503649635, |
|
"grad_norm": 0.6706894040107727, |
|
"learning_rate": 6.817702470744477e-05, |
|
"loss": 1.2673, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.442822384428224, |
|
"grad_norm": 0.7693089842796326, |
|
"learning_rate": 6.448056714980767e-05, |
|
"loss": 1.2907, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.5206812652068127, |
|
"grad_norm": 0.6204941868782043, |
|
"learning_rate": 6.071580188860955e-05, |
|
"loss": 1.3099, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.5985401459854014, |
|
"grad_norm": 0.5506797432899475, |
|
"learning_rate": 5.690985414382668e-05, |
|
"loss": 1.186, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.67639902676399, |
|
"grad_norm": 0.7013995051383972, |
|
"learning_rate": 5.3090145856173346e-05, |
|
"loss": 1.3578, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.754257907542579, |
|
"grad_norm": 0.5334713459014893, |
|
"learning_rate": 4.9284198111390456e-05, |
|
"loss": 1.1875, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.832116788321168, |
|
"grad_norm": 0.6609035134315491, |
|
"learning_rate": 4.551943285019234e-05, |
|
"loss": 1.1562, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.9099756690997567, |
|
"grad_norm": 0.723108172416687, |
|
"learning_rate": 4.182297529255525e-05, |
|
"loss": 1.22, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.0291970802919708, |
|
"grad_norm": 0.9732814431190491, |
|
"learning_rate": 3.822145849988612e-05, |
|
"loss": 2.0469, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.0291970802919708, |
|
"eval_loss": 1.2940391302108765, |
|
"eval_runtime": 0.9708, |
|
"eval_samples_per_second": 51.505, |
|
"eval_steps_per_second": 13.391, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.1070559610705595, |
|
"grad_norm": 0.4368249177932739, |
|
"learning_rate": 3.474083148320469e-05, |
|
"loss": 1.0774, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.1849148418491486, |
|
"grad_norm": 0.6278235912322998, |
|
"learning_rate": 3.1406172239924584e-05, |
|
"loss": 1.1516, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.2627737226277373, |
|
"grad_norm": 0.5482242703437805, |
|
"learning_rate": 2.8241507066307104e-05, |
|
"loss": 1.0975, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.340632603406326, |
|
"grad_norm": 0.45156601071357727, |
|
"learning_rate": 2.5269637447446348e-05, |
|
"loss": 1.0717, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.4184914841849148, |
|
"grad_norm": 0.5456267595291138, |
|
"learning_rate": 2.2511975772046403e-05, |
|
"loss": 1.1172, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.4963503649635035, |
|
"grad_norm": 0.5466234683990479, |
|
"learning_rate": 1.9988391055670233e-05, |
|
"loss": 1.1527, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.574209245742092, |
|
"grad_norm": 0.4269847571849823, |
|
"learning_rate": 1.771706578402711e-05, |
|
"loss": 1.0926, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.6520681265206814, |
|
"grad_norm": 0.5346320867538452, |
|
"learning_rate": 1.5714364907746536e-05, |
|
"loss": 1.0923, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.72992700729927, |
|
"grad_norm": 0.5671536326408386, |
|
"learning_rate": 1.3994717932533891e-05, |
|
"loss": 1.1694, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.807785888077859, |
|
"grad_norm": 0.4398534297943115, |
|
"learning_rate": 1.257051495425121e-05, |
|
"loss": 1.1125, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.885644768856448, |
|
"grad_norm": 0.560420572757721, |
|
"learning_rate": 1.1452017387992552e-05, |
|
"loss": 1.1148, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.004866180048662, |
|
"grad_norm": 0.9056612253189087, |
|
"learning_rate": 1.064728403435312e-05, |
|
"loss": 1.7632, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 3.0827250608272507, |
|
"grad_norm": 0.3938903212547302, |
|
"learning_rate": 1.0162113015586309e-05, |
|
"loss": 1.144, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.1605839416058394, |
|
"grad_norm": 0.48829081654548645, |
|
"learning_rate": 1e-05, |
|
"loss": 1.045, |
|
"step": 39 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 39, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.738816801115996e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|