bbytxt's picture
Training in progress, step 75, checkpoint
8c48748 verified
{
"best_metric": 0.003693485399708152,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.042280318511732785,
"eval_steps": 50,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005637375801564371,
"grad_norm": 2.1648833751678467,
"learning_rate": 5e-06,
"loss": 0.419,
"step": 1
},
{
"epoch": 0.0005637375801564371,
"eval_loss": 0.4619485139846802,
"eval_runtime": 483.5609,
"eval_samples_per_second": 6.179,
"eval_steps_per_second": 1.545,
"step": 1
},
{
"epoch": 0.0011274751603128743,
"grad_norm": 2.5114240646362305,
"learning_rate": 1e-05,
"loss": 0.4315,
"step": 2
},
{
"epoch": 0.0016912127404693116,
"grad_norm": 2.378593683242798,
"learning_rate": 1.5e-05,
"loss": 0.4334,
"step": 3
},
{
"epoch": 0.0022549503206257485,
"grad_norm": 2.1998493671417236,
"learning_rate": 2e-05,
"loss": 0.4171,
"step": 4
},
{
"epoch": 0.002818687900782186,
"grad_norm": 2.156386613845825,
"learning_rate": 2.5e-05,
"loss": 0.3483,
"step": 5
},
{
"epoch": 0.0033824254809386232,
"grad_norm": 1.2216063737869263,
"learning_rate": 3e-05,
"loss": 0.2282,
"step": 6
},
{
"epoch": 0.00394616306109506,
"grad_norm": 0.9967670440673828,
"learning_rate": 3.5e-05,
"loss": 0.189,
"step": 7
},
{
"epoch": 0.004509900641251497,
"grad_norm": 1.1083791255950928,
"learning_rate": 4e-05,
"loss": 0.1456,
"step": 8
},
{
"epoch": 0.005073638221407935,
"grad_norm": 0.6914762258529663,
"learning_rate": 4.5e-05,
"loss": 0.1457,
"step": 9
},
{
"epoch": 0.005637375801564372,
"grad_norm": 0.7410008907318115,
"learning_rate": 5e-05,
"loss": 0.0622,
"step": 10
},
{
"epoch": 0.006201113381720809,
"grad_norm": 1.2113826274871826,
"learning_rate": 5.500000000000001e-05,
"loss": 0.1035,
"step": 11
},
{
"epoch": 0.0067648509618772465,
"grad_norm": 0.8289437890052795,
"learning_rate": 6e-05,
"loss": 0.0723,
"step": 12
},
{
"epoch": 0.007328588542033683,
"grad_norm": 1.0702934265136719,
"learning_rate": 6.500000000000001e-05,
"loss": 0.0522,
"step": 13
},
{
"epoch": 0.00789232612219012,
"grad_norm": 0.6274738311767578,
"learning_rate": 7e-05,
"loss": 0.0412,
"step": 14
},
{
"epoch": 0.008456063702346557,
"grad_norm": 0.4598556160926819,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0206,
"step": 15
},
{
"epoch": 0.009019801282502994,
"grad_norm": 0.5833584666252136,
"learning_rate": 8e-05,
"loss": 0.0287,
"step": 16
},
{
"epoch": 0.009583538862659433,
"grad_norm": 0.7033435702323914,
"learning_rate": 8.5e-05,
"loss": 0.0215,
"step": 17
},
{
"epoch": 0.01014727644281587,
"grad_norm": 0.3314254581928253,
"learning_rate": 9e-05,
"loss": 0.0282,
"step": 18
},
{
"epoch": 0.010711014022972306,
"grad_norm": 0.2847822308540344,
"learning_rate": 9.5e-05,
"loss": 0.0117,
"step": 19
},
{
"epoch": 0.011274751603128744,
"grad_norm": 0.16592615842819214,
"learning_rate": 0.0001,
"loss": 0.0063,
"step": 20
},
{
"epoch": 0.011838489183285181,
"grad_norm": 0.28353503346443176,
"learning_rate": 9.991845519630678e-05,
"loss": 0.0141,
"step": 21
},
{
"epoch": 0.012402226763441618,
"grad_norm": 0.5575234293937683,
"learning_rate": 9.967408676742751e-05,
"loss": 0.0176,
"step": 22
},
{
"epoch": 0.012965964343598054,
"grad_norm": 0.6154017448425293,
"learning_rate": 9.926769179238466e-05,
"loss": 0.0159,
"step": 23
},
{
"epoch": 0.013529701923754493,
"grad_norm": 0.1513437181711197,
"learning_rate": 9.870059584711668e-05,
"loss": 0.0053,
"step": 24
},
{
"epoch": 0.01409343950391093,
"grad_norm": 0.24261268973350525,
"learning_rate": 9.797464868072488e-05,
"loss": 0.0081,
"step": 25
},
{
"epoch": 0.014657177084067366,
"grad_norm": 0.09996633231639862,
"learning_rate": 9.709221818197624e-05,
"loss": 0.0042,
"step": 26
},
{
"epoch": 0.015220914664223803,
"grad_norm": 0.09420917183160782,
"learning_rate": 9.60561826557425e-05,
"loss": 0.0065,
"step": 27
},
{
"epoch": 0.01578465224438024,
"grad_norm": 0.15072062611579895,
"learning_rate": 9.486992143456792e-05,
"loss": 0.0045,
"step": 28
},
{
"epoch": 0.01634838982453668,
"grad_norm": 0.28533950448036194,
"learning_rate": 9.353730385598887e-05,
"loss": 0.0139,
"step": 29
},
{
"epoch": 0.016912127404693115,
"grad_norm": 0.30663344264030457,
"learning_rate": 9.206267664155907e-05,
"loss": 0.0063,
"step": 30
},
{
"epoch": 0.017475864984849553,
"grad_norm": 0.09105496853590012,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0037,
"step": 31
},
{
"epoch": 0.018039602565005988,
"grad_norm": 0.1351594626903534,
"learning_rate": 8.870708053195413e-05,
"loss": 0.0046,
"step": 32
},
{
"epoch": 0.018603340145162427,
"grad_norm": 0.10735020041465759,
"learning_rate": 8.683705689382024e-05,
"loss": 0.0031,
"step": 33
},
{
"epoch": 0.019167077725318865,
"grad_norm": 0.20175467431545258,
"learning_rate": 8.484687843276469e-05,
"loss": 0.0043,
"step": 34
},
{
"epoch": 0.0197308153054753,
"grad_norm": 0.19858188927173615,
"learning_rate": 8.274303669726426e-05,
"loss": 0.0068,
"step": 35
},
{
"epoch": 0.02029455288563174,
"grad_norm": 0.2514883279800415,
"learning_rate": 8.053239398177191e-05,
"loss": 0.0055,
"step": 36
},
{
"epoch": 0.020858290465788177,
"grad_norm": 0.18291513621807098,
"learning_rate": 7.822216094333847e-05,
"loss": 0.0026,
"step": 37
},
{
"epoch": 0.021422028045944612,
"grad_norm": 0.2146664708852768,
"learning_rate": 7.58198730819481e-05,
"loss": 0.0073,
"step": 38
},
{
"epoch": 0.02198576562610105,
"grad_norm": 0.10978978872299194,
"learning_rate": 7.333336616128369e-05,
"loss": 0.0053,
"step": 39
},
{
"epoch": 0.02254950320625749,
"grad_norm": 0.11510439962148666,
"learning_rate": 7.077075065009433e-05,
"loss": 0.0055,
"step": 40
},
{
"epoch": 0.023113240786413924,
"grad_norm": 0.28216826915740967,
"learning_rate": 6.814038526753205e-05,
"loss": 0.0068,
"step": 41
},
{
"epoch": 0.023676978366570362,
"grad_norm": 0.07864489406347275,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0025,
"step": 42
},
{
"epoch": 0.024240715946726797,
"grad_norm": 0.16902923583984375,
"learning_rate": 6.271091670967436e-05,
"loss": 0.0059,
"step": 43
},
{
"epoch": 0.024804453526883236,
"grad_norm": 0.2910400927066803,
"learning_rate": 5.992952333228728e-05,
"loss": 0.0067,
"step": 44
},
{
"epoch": 0.025368191107039674,
"grad_norm": 0.09773421287536621,
"learning_rate": 5.7115741913664264e-05,
"loss": 0.0025,
"step": 45
},
{
"epoch": 0.02593192868719611,
"grad_norm": 0.4094434678554535,
"learning_rate": 5.427875042394199e-05,
"loss": 0.0079,
"step": 46
},
{
"epoch": 0.026495666267352547,
"grad_norm": 8.514568328857422,
"learning_rate": 5.142780253968481e-05,
"loss": 0.0685,
"step": 47
},
{
"epoch": 0.027059403847508986,
"grad_norm": 0.21161220967769623,
"learning_rate": 4.85721974603152e-05,
"loss": 0.0037,
"step": 48
},
{
"epoch": 0.02762314142766542,
"grad_norm": 0.07243302464485168,
"learning_rate": 4.5721249576058027e-05,
"loss": 0.0016,
"step": 49
},
{
"epoch": 0.02818687900782186,
"grad_norm": 0.09567948430776596,
"learning_rate": 4.288425808633575e-05,
"loss": 0.005,
"step": 50
},
{
"epoch": 0.02818687900782186,
"eval_loss": 0.003693485399708152,
"eval_runtime": 486.3662,
"eval_samples_per_second": 6.144,
"eval_steps_per_second": 1.536,
"step": 50
},
{
"epoch": 0.028750616587978298,
"grad_norm": 0.1225830614566803,
"learning_rate": 4.007047666771274e-05,
"loss": 0.0021,
"step": 51
},
{
"epoch": 0.029314354168134733,
"grad_norm": 0.2588897943496704,
"learning_rate": 3.728908329032567e-05,
"loss": 0.0056,
"step": 52
},
{
"epoch": 0.02987809174829117,
"grad_norm": 0.032464101910591125,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0014,
"step": 53
},
{
"epoch": 0.030441829328447606,
"grad_norm": 0.06320462375879288,
"learning_rate": 3.1859614732467954e-05,
"loss": 0.0015,
"step": 54
},
{
"epoch": 0.031005566908604044,
"grad_norm": 0.09139032661914825,
"learning_rate": 2.9229249349905684e-05,
"loss": 0.0038,
"step": 55
},
{
"epoch": 0.03156930448876048,
"grad_norm": 0.21807141602039337,
"learning_rate": 2.6666633838716314e-05,
"loss": 0.0058,
"step": 56
},
{
"epoch": 0.03213304206891692,
"grad_norm": 0.044844482094049454,
"learning_rate": 2.418012691805191e-05,
"loss": 0.0015,
"step": 57
},
{
"epoch": 0.03269677964907336,
"grad_norm": 0.411823034286499,
"learning_rate": 2.1777839056661554e-05,
"loss": 0.0509,
"step": 58
},
{
"epoch": 0.03326051722922979,
"grad_norm": 0.08910378813743591,
"learning_rate": 1.946760601822809e-05,
"loss": 0.0028,
"step": 59
},
{
"epoch": 0.03382425480938623,
"grad_norm": 0.02365848794579506,
"learning_rate": 1.725696330273575e-05,
"loss": 0.0012,
"step": 60
},
{
"epoch": 0.03438799238954267,
"grad_norm": 0.07241960614919662,
"learning_rate": 1.5153121567235335e-05,
"loss": 0.002,
"step": 61
},
{
"epoch": 0.03495172996969911,
"grad_norm": 0.12868086993694305,
"learning_rate": 1.3162943106179749e-05,
"loss": 0.0068,
"step": 62
},
{
"epoch": 0.035515467549855545,
"grad_norm": 0.2509536147117615,
"learning_rate": 1.1292919468045877e-05,
"loss": 0.0093,
"step": 63
},
{
"epoch": 0.036079205130011976,
"grad_norm": 0.16926199197769165,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0135,
"step": 64
},
{
"epoch": 0.036642942710168415,
"grad_norm": 0.06538938730955124,
"learning_rate": 7.937323358440935e-06,
"loss": 0.0022,
"step": 65
},
{
"epoch": 0.03720668029032485,
"grad_norm": 0.03876176103949547,
"learning_rate": 6.462696144011149e-06,
"loss": 0.0015,
"step": 66
},
{
"epoch": 0.03777041787048129,
"grad_norm": 0.05735520273447037,
"learning_rate": 5.13007856543209e-06,
"loss": 0.0021,
"step": 67
},
{
"epoch": 0.03833415545063773,
"grad_norm": 0.04489413648843765,
"learning_rate": 3.9438173442575e-06,
"loss": 0.0018,
"step": 68
},
{
"epoch": 0.03889789303079417,
"grad_norm": 0.05411110073328018,
"learning_rate": 2.9077818180237693e-06,
"loss": 0.0019,
"step": 69
},
{
"epoch": 0.0394616306109506,
"grad_norm": 0.13937705755233765,
"learning_rate": 2.0253513192751373e-06,
"loss": 0.0019,
"step": 70
},
{
"epoch": 0.04002536819110704,
"grad_norm": 0.11924191564321518,
"learning_rate": 1.2994041528833266e-06,
"loss": 0.0035,
"step": 71
},
{
"epoch": 0.04058910577126348,
"grad_norm": 0.08436208218336105,
"learning_rate": 7.323082076153509e-07,
"loss": 0.003,
"step": 72
},
{
"epoch": 0.041152843351419915,
"grad_norm": 0.36739325523376465,
"learning_rate": 3.2591323257248893e-07,
"loss": 0.0077,
"step": 73
},
{
"epoch": 0.041716580931576354,
"grad_norm": 0.10039687901735306,
"learning_rate": 8.15448036932176e-08,
"loss": 0.0047,
"step": 74
},
{
"epoch": 0.042280318511732785,
"grad_norm": 0.1667713224887848,
"learning_rate": 0.0,
"loss": 0.006,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.2703129360125133e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}