|
{ |
|
"best_metric": 0.19360065460205078, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.023736055067647758, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00047472110135295516, |
|
"grad_norm": 0.4688243269920349, |
|
"learning_rate": 0.0001, |
|
"loss": 0.6124, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00047472110135295516, |
|
"eval_loss": 1.0957424640655518, |
|
"eval_runtime": 194.003, |
|
"eval_samples_per_second": 18.288, |
|
"eval_steps_per_second": 2.289, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009494422027059103, |
|
"grad_norm": 0.6601790189743042, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8464, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0014241633040588653, |
|
"grad_norm": 0.7652206420898438, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 0.965, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0018988844054118206, |
|
"grad_norm": 0.850256085395813, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 0.6595, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0023736055067647755, |
|
"grad_norm": 1.034886360168457, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 0.4754, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0028483266081177306, |
|
"grad_norm": 0.9273011684417725, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 0.3089, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.003323047709470686, |
|
"grad_norm": 1.7200816869735718, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 0.3123, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0037977688108236413, |
|
"grad_norm": 0.935492753982544, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.2688, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004272489912176596, |
|
"grad_norm": 0.5538306832313538, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 0.2015, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004747211013529551, |
|
"grad_norm": 0.5412841439247131, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.2237, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005221932114882507, |
|
"grad_norm": 0.9080474972724915, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 0.1714, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005696653216235461, |
|
"grad_norm": 0.5930872559547424, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 0.25, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006171374317588417, |
|
"grad_norm": 0.8284171223640442, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 0.3136, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.006646095418941372, |
|
"grad_norm": 0.541205883026123, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.2078, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007120816520294327, |
|
"grad_norm": 0.3955840468406677, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 0.1586, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0075955376216472826, |
|
"grad_norm": 0.48258212208747864, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 0.2005, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.008070258723000238, |
|
"grad_norm": 0.39275145530700684, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.1783, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008544979824353193, |
|
"grad_norm": 0.3785333037376404, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.1541, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009019700925706147, |
|
"grad_norm": 0.594031035900116, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 0.2355, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009494422027059102, |
|
"grad_norm": 0.4767218232154846, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.2854, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.009969143128412059, |
|
"grad_norm": 0.4265936315059662, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 0.2417, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.010443864229765013, |
|
"grad_norm": 0.37528008222579956, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.1375, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.010918585331117968, |
|
"grad_norm": 0.47274863719940186, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.2293, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011393306432470923, |
|
"grad_norm": 0.5729778409004211, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 0.2553, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.011868027533823879, |
|
"grad_norm": 0.5274404287338257, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 0.2306, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.011868027533823879, |
|
"eval_loss": 0.19939672946929932, |
|
"eval_runtime": 194.6731, |
|
"eval_samples_per_second": 18.225, |
|
"eval_steps_per_second": 2.281, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012342748635176834, |
|
"grad_norm": 0.3555451035499573, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1971, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.012817469736529788, |
|
"grad_norm": 0.34671762585639954, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 0.1603, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.013292190837882745, |
|
"grad_norm": 0.20731107890605927, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.1453, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0137669119392357, |
|
"grad_norm": 0.3372092545032501, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.2641, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.014241633040588654, |
|
"grad_norm": 0.32797935605049133, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.1899, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.014716354141941609, |
|
"grad_norm": 0.3118788003921509, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 0.1824, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.015191075243294565, |
|
"grad_norm": 0.366529643535614, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.1616, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.015665796344647518, |
|
"grad_norm": 0.46039462089538574, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 0.2017, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.016140517446000476, |
|
"grad_norm": 0.3713008165359497, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.1935, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01661523854735343, |
|
"grad_norm": 0.38472941517829895, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.2168, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.017089959648706386, |
|
"grad_norm": 0.44644981622695923, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 0.2307, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.01756468075005934, |
|
"grad_norm": 0.5147837996482849, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 0.251, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.018039401851412295, |
|
"grad_norm": 0.3486250340938568, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.176, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01851412295276525, |
|
"grad_norm": 0.3016928732395172, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 0.2325, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.018988844054118204, |
|
"grad_norm": 0.3231501877307892, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.2517, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.019463565155471162, |
|
"grad_norm": 0.325752854347229, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.1717, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.019938286256824117, |
|
"grad_norm": 0.2623179852962494, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.1396, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.02041300735817707, |
|
"grad_norm": 0.3840673565864563, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 0.2494, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.020887728459530026, |
|
"grad_norm": 0.4204588234424591, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.2365, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.02136244956088298, |
|
"grad_norm": 0.29817184805870056, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 0.208, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.021837170662235936, |
|
"grad_norm": 0.4304487705230713, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.2959, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.02231189176358889, |
|
"grad_norm": 0.361851304769516, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.136, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.022786612864941845, |
|
"grad_norm": 0.46964964270591736, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 0.2349, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.023261333966294803, |
|
"grad_norm": 0.6576246619224548, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 0.272, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.023736055067647758, |
|
"grad_norm": 0.5070633292198181, |
|
"learning_rate": 0.0, |
|
"loss": 0.1968, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.023736055067647758, |
|
"eval_loss": 0.19360065460205078, |
|
"eval_runtime": 194.3616, |
|
"eval_samples_per_second": 18.255, |
|
"eval_steps_per_second": 2.284, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.796154706198528e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|