|
{ |
|
"best_metric": 2.1023528575897217, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.029682398337785694, |
|
"eval_steps": 50, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003957653111704759, |
|
"grad_norm": 17.165529251098633, |
|
"learning_rate": 5e-06, |
|
"loss": 9.5056, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003957653111704759, |
|
"eval_loss": 9.463391304016113, |
|
"eval_runtime": 165.7321, |
|
"eval_samples_per_second": 51.354, |
|
"eval_steps_per_second": 12.84, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007915306223409518, |
|
"grad_norm": 16.710681915283203, |
|
"learning_rate": 1e-05, |
|
"loss": 9.513, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0011872959335114278, |
|
"grad_norm": 16.190683364868164, |
|
"learning_rate": 1.5e-05, |
|
"loss": 9.4406, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0015830612446819036, |
|
"grad_norm": 17.282331466674805, |
|
"learning_rate": 2e-05, |
|
"loss": 9.4612, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0019788265558523797, |
|
"grad_norm": 17.428613662719727, |
|
"learning_rate": 2.5e-05, |
|
"loss": 9.3862, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0023745918670228555, |
|
"grad_norm": 18.054773330688477, |
|
"learning_rate": 3e-05, |
|
"loss": 9.2646, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0027703571781933314, |
|
"grad_norm": 17.870378494262695, |
|
"learning_rate": 3.5e-05, |
|
"loss": 9.0348, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0031661224893638072, |
|
"grad_norm": 17.9739990234375, |
|
"learning_rate": 4e-05, |
|
"loss": 8.7023, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.003561887800534283, |
|
"grad_norm": 17.747678756713867, |
|
"learning_rate": 4.5e-05, |
|
"loss": 8.653, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.003957653111704759, |
|
"grad_norm": 18.454126358032227, |
|
"learning_rate": 5e-05, |
|
"loss": 8.3382, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004353418422875235, |
|
"grad_norm": 17.509685516357422, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 8.0545, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.004749183734045711, |
|
"grad_norm": 17.798004150390625, |
|
"learning_rate": 6e-05, |
|
"loss": 7.7362, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.005144949045216187, |
|
"grad_norm": 15.242817878723145, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 7.4385, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.005540714356386663, |
|
"grad_norm": 13.07628059387207, |
|
"learning_rate": 7e-05, |
|
"loss": 7.1798, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.005936479667557139, |
|
"grad_norm": 11.167773246765137, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 6.9803, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0063322449787276145, |
|
"grad_norm": 10.047318458557129, |
|
"learning_rate": 8e-05, |
|
"loss": 6.7496, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00672801028989809, |
|
"grad_norm": 10.476768493652344, |
|
"learning_rate": 8.5e-05, |
|
"loss": 6.5605, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.007123775601068566, |
|
"grad_norm": 10.307733535766602, |
|
"learning_rate": 9e-05, |
|
"loss": 6.2643, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.007519540912239042, |
|
"grad_norm": 10.599945068359375, |
|
"learning_rate": 9.5e-05, |
|
"loss": 6.1297, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.007915306223409519, |
|
"grad_norm": 10.505680084228516, |
|
"learning_rate": 0.0001, |
|
"loss": 5.8766, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.008311071534579995, |
|
"grad_norm": 9.522797584533691, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 5.626, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00870683684575047, |
|
"grad_norm": 9.296674728393555, |
|
"learning_rate": 9.967408676742751e-05, |
|
"loss": 5.3456, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.009102602156920946, |
|
"grad_norm": 8.635791778564453, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 5.1444, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.009498367468091422, |
|
"grad_norm": 8.5535888671875, |
|
"learning_rate": 9.870059584711668e-05, |
|
"loss": 4.9605, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.009894132779261898, |
|
"grad_norm": 8.049251556396484, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 4.8101, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.010289898090432374, |
|
"grad_norm": 8.621903419494629, |
|
"learning_rate": 9.709221818197624e-05, |
|
"loss": 4.5989, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01068566340160285, |
|
"grad_norm": 8.04345703125, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 4.4125, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.011081428712773326, |
|
"grad_norm": 7.404324054718018, |
|
"learning_rate": 9.486992143456792e-05, |
|
"loss": 4.2554, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.011477194023943801, |
|
"grad_norm": 7.918259620666504, |
|
"learning_rate": 9.353730385598887e-05, |
|
"loss": 4.0894, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.011872959335114277, |
|
"grad_norm": 7.785238742828369, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 3.9766, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.012268724646284753, |
|
"grad_norm": 7.892620086669922, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 3.806, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.012664489957455229, |
|
"grad_norm": 6.364659786224365, |
|
"learning_rate": 8.870708053195413e-05, |
|
"loss": 3.6896, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.013060255268625705, |
|
"grad_norm": 6.582086086273193, |
|
"learning_rate": 8.683705689382024e-05, |
|
"loss": 3.5419, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01345602057979618, |
|
"grad_norm": 6.028378963470459, |
|
"learning_rate": 8.484687843276469e-05, |
|
"loss": 3.3398, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.013851785890966656, |
|
"grad_norm": 6.743649005889893, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 3.2613, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.014247551202137132, |
|
"grad_norm": 5.946102142333984, |
|
"learning_rate": 8.053239398177191e-05, |
|
"loss": 3.1901, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.014643316513307608, |
|
"grad_norm": 5.685393333435059, |
|
"learning_rate": 7.822216094333847e-05, |
|
"loss": 3.0342, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.015039081824478084, |
|
"grad_norm": 5.397143363952637, |
|
"learning_rate": 7.58198730819481e-05, |
|
"loss": 2.9706, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01543484713564856, |
|
"grad_norm": 5.114358425140381, |
|
"learning_rate": 7.333336616128369e-05, |
|
"loss": 2.8392, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.015830612446819037, |
|
"grad_norm": 4.752023696899414, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 2.7408, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01622637775798951, |
|
"grad_norm": 4.810094356536865, |
|
"learning_rate": 6.814038526753205e-05, |
|
"loss": 2.6399, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.01662214306915999, |
|
"grad_norm": 4.728804111480713, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.4988, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.017017908380330463, |
|
"grad_norm": 4.380035400390625, |
|
"learning_rate": 6.271091670967436e-05, |
|
"loss": 2.4686, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.01741367369150094, |
|
"grad_norm": 4.446783542633057, |
|
"learning_rate": 5.992952333228728e-05, |
|
"loss": 2.4156, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.017809439002671415, |
|
"grad_norm": 4.009081840515137, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 2.3587, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.018205204313841893, |
|
"grad_norm": 3.3682971000671387, |
|
"learning_rate": 5.427875042394199e-05, |
|
"loss": 2.2072, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.018600969625012367, |
|
"grad_norm": 3.4791717529296875, |
|
"learning_rate": 5.142780253968481e-05, |
|
"loss": 2.1792, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.018996734936182844, |
|
"grad_norm": 3.7284488677978516, |
|
"learning_rate": 4.85721974603152e-05, |
|
"loss": 2.2048, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.01939250024735332, |
|
"grad_norm": 2.6523303985595703, |
|
"learning_rate": 4.5721249576058027e-05, |
|
"loss": 2.1867, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.019788265558523796, |
|
"grad_norm": 2.7477378845214844, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 2.1158, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.019788265558523796, |
|
"eval_loss": 2.1023528575897217, |
|
"eval_runtime": 166.1256, |
|
"eval_samples_per_second": 51.232, |
|
"eval_steps_per_second": 12.81, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02018403086969427, |
|
"grad_norm": 2.083672046661377, |
|
"learning_rate": 4.007047666771274e-05, |
|
"loss": 2.1264, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.020579796180864748, |
|
"grad_norm": 2.1195905208587646, |
|
"learning_rate": 3.728908329032567e-05, |
|
"loss": 2.0814, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.020975561492035222, |
|
"grad_norm": 2.336852550506592, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 2.0431, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.0213713268032057, |
|
"grad_norm": 2.270529270172119, |
|
"learning_rate": 3.1859614732467954e-05, |
|
"loss": 2.0404, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.021767092114376174, |
|
"grad_norm": 1.912915587425232, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 2.0207, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.02216285742554665, |
|
"grad_norm": 2.281681537628174, |
|
"learning_rate": 2.6666633838716314e-05, |
|
"loss": 2.067, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.022558622736717125, |
|
"grad_norm": 1.9110603332519531, |
|
"learning_rate": 2.418012691805191e-05, |
|
"loss": 1.9778, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.022954388047887603, |
|
"grad_norm": 1.8000211715698242, |
|
"learning_rate": 2.1777839056661554e-05, |
|
"loss": 1.9394, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.023350153359058077, |
|
"grad_norm": 1.7174650430679321, |
|
"learning_rate": 1.946760601822809e-05, |
|
"loss": 1.9967, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.023745918670228554, |
|
"grad_norm": 2.0586373805999756, |
|
"learning_rate": 1.725696330273575e-05, |
|
"loss": 1.9891, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.024141683981399032, |
|
"grad_norm": 2.5579705238342285, |
|
"learning_rate": 1.5153121567235335e-05, |
|
"loss": 1.9442, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.024537449292569506, |
|
"grad_norm": 1.8055744171142578, |
|
"learning_rate": 1.3162943106179749e-05, |
|
"loss": 1.9219, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.024933214603739984, |
|
"grad_norm": 1.803011417388916, |
|
"learning_rate": 1.1292919468045877e-05, |
|
"loss": 2.0183, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.025328979914910458, |
|
"grad_norm": 1.7811270952224731, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.8916, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.025724745226080935, |
|
"grad_norm": 1.733910322189331, |
|
"learning_rate": 7.937323358440935e-06, |
|
"loss": 1.9905, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.02612051053725141, |
|
"grad_norm": 1.70806086063385, |
|
"learning_rate": 6.462696144011149e-06, |
|
"loss": 1.8904, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.026516275848421887, |
|
"grad_norm": 1.6646728515625, |
|
"learning_rate": 5.13007856543209e-06, |
|
"loss": 1.9227, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.02691204115959236, |
|
"grad_norm": 1.7067934274673462, |
|
"learning_rate": 3.9438173442575e-06, |
|
"loss": 1.9334, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.02730780647076284, |
|
"grad_norm": 1.6381385326385498, |
|
"learning_rate": 2.9077818180237693e-06, |
|
"loss": 1.9143, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.027703571781933313, |
|
"grad_norm": 1.9795570373535156, |
|
"learning_rate": 2.0253513192751373e-06, |
|
"loss": 1.9075, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.02809933709310379, |
|
"grad_norm": 1.563312292098999, |
|
"learning_rate": 1.2994041528833266e-06, |
|
"loss": 1.907, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.028495102404274265, |
|
"grad_norm": 1.734750747680664, |
|
"learning_rate": 7.323082076153509e-07, |
|
"loss": 1.8587, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.028890867715444742, |
|
"grad_norm": 1.636612892150879, |
|
"learning_rate": 3.2591323257248893e-07, |
|
"loss": 1.9035, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.029286633026615216, |
|
"grad_norm": 2.093644380569458, |
|
"learning_rate": 8.15448036932176e-08, |
|
"loss": 1.9695, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.029682398337785694, |
|
"grad_norm": 1.7379897832870483, |
|
"learning_rate": 0.0, |
|
"loss": 1.8646, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 196236804096000.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|