|
{ |
|
"best_metric": 0.9395787715911865, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.016244314489928524, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003248862897985705, |
|
"grad_norm": 0.7047044038772583, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9759, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003248862897985705, |
|
"eval_loss": 2.686628580093384, |
|
"eval_runtime": 340.4839, |
|
"eval_samples_per_second": 3.809, |
|
"eval_steps_per_second": 1.906, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000649772579597141, |
|
"grad_norm": 0.8073049783706665, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5715, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0009746588693957114, |
|
"grad_norm": 0.8195893168449402, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 2.4832, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.001299545159194282, |
|
"grad_norm": 1.178743839263916, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 2.1465, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0016244314489928524, |
|
"grad_norm": 3.021273136138916, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 2.0203, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.001949317738791423, |
|
"grad_norm": 1.3877650499343872, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 1.8335, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0022742040285899934, |
|
"grad_norm": 1.4954441785812378, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 1.8058, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.002599090318388564, |
|
"grad_norm": 0.9874308705329895, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 1.5395, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0029239766081871343, |
|
"grad_norm": 1.0399869680404663, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 1.626, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.003248862897985705, |
|
"grad_norm": 0.9299101829528809, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.5091, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0035737491877842753, |
|
"grad_norm": 0.9380367398262024, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 1.4576, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.003898635477582846, |
|
"grad_norm": 0.990161657333374, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 1.2058, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.004223521767381416, |
|
"grad_norm": 1.0648465156555176, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 1.0389, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.004548408057179987, |
|
"grad_norm": 0.7876304388046265, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.0257, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.004873294346978557, |
|
"grad_norm": 0.7823423743247986, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 1.294, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005198180636777128, |
|
"grad_norm": 1.018497109413147, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 1.1339, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.005523066926575698, |
|
"grad_norm": 0.8195908069610596, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.943, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.005847953216374269, |
|
"grad_norm": 0.914745032787323, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.0052, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.006172839506172839, |
|
"grad_norm": 1.1211453676223755, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 1.1446, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.00649772579597141, |
|
"grad_norm": 1.2552735805511475, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 1.1098, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00682261208576998, |
|
"grad_norm": 0.9171386957168579, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 0.9569, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.007147498375568551, |
|
"grad_norm": 0.934035062789917, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.955, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.007472384665367121, |
|
"grad_norm": 1.086276888847351, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.9235, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.007797270955165692, |
|
"grad_norm": 1.0728775262832642, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 0.8594, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.008122157244964262, |
|
"grad_norm": 1.2129361629486084, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 0.9563, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.008122157244964262, |
|
"eval_loss": 0.9595276117324829, |
|
"eval_runtime": 342.6456, |
|
"eval_samples_per_second": 3.785, |
|
"eval_steps_per_second": 1.894, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.008447043534762833, |
|
"grad_norm": 1.1689411401748657, |
|
"learning_rate": 0.0001, |
|
"loss": 0.9271, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.008771929824561403, |
|
"grad_norm": 0.98142409324646, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 0.7725, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.009096816114359974, |
|
"grad_norm": 0.855100154876709, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.8737, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.009421702404158544, |
|
"grad_norm": 0.7963764667510986, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.7265, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.009746588693957114, |
|
"grad_norm": 0.6895178556442261, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.8324, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.010071474983755685, |
|
"grad_norm": 0.8792585730552673, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 0.7426, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.010396361273554255, |
|
"grad_norm": 0.7640400528907776, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.8364, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.010721247563352826, |
|
"grad_norm": 0.717138409614563, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 0.8579, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.011046133853151396, |
|
"grad_norm": 0.6296213269233704, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.7416, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.011371020142949967, |
|
"grad_norm": 0.6830035448074341, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.793, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.011695906432748537, |
|
"grad_norm": 0.866948664188385, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 0.7564, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.012020792722547108, |
|
"grad_norm": 0.7266378998756409, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 0.9034, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.012345679012345678, |
|
"grad_norm": 0.752968966960907, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.8902, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.012670565302144249, |
|
"grad_norm": 0.6887612342834473, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 0.7917, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.01299545159194282, |
|
"grad_norm": 0.913896918296814, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.7918, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01332033788174139, |
|
"grad_norm": 0.7052801847457886, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.6793, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.01364522417153996, |
|
"grad_norm": 0.6854836940765381, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.7743, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.01397011046133853, |
|
"grad_norm": 0.6507396697998047, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 0.8359, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.014294996751137101, |
|
"grad_norm": 0.7426648139953613, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.8788, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.014619883040935672, |
|
"grad_norm": 0.569381058216095, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 0.7171, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.014944769330734242, |
|
"grad_norm": 0.7846092581748962, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.7299, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.015269655620532813, |
|
"grad_norm": 0.6389809250831604, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.7145, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.015594541910331383, |
|
"grad_norm": 0.5611962676048279, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 0.6854, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.015919428200129954, |
|
"grad_norm": 0.8515998125076294, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 0.7188, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.016244314489928524, |
|
"grad_norm": 1.1273244619369507, |
|
"learning_rate": 0.0, |
|
"loss": 0.8396, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.016244314489928524, |
|
"eval_loss": 0.9395787715911865, |
|
"eval_runtime": 342.6187, |
|
"eval_samples_per_second": 3.786, |
|
"eval_steps_per_second": 1.894, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.53464644354048e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|