|
{ |
|
"best_metric": 10.37548828125, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 3.0409356725146197, |
|
"eval_steps": 25, |
|
"global_step": 65, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04678362573099415, |
|
"grad_norm": 0.08035193383693695, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 10.3946, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04678362573099415, |
|
"eval_loss": 10.393576622009277, |
|
"eval_runtime": 0.0768, |
|
"eval_samples_per_second": 650.798, |
|
"eval_steps_per_second": 169.208, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0935672514619883, |
|
"grad_norm": 0.07648925483226776, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 10.3947, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.14035087719298245, |
|
"grad_norm": 0.07350781559944153, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3923, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1871345029239766, |
|
"grad_norm": 0.08065179735422134, |
|
"learning_rate": 9.994224282269737e-05, |
|
"loss": 10.3843, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.23391812865497075, |
|
"grad_norm": 0.10446092486381531, |
|
"learning_rate": 9.976911955263529e-05, |
|
"loss": 10.3794, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.2807017543859649, |
|
"grad_norm": 0.0764802023768425, |
|
"learning_rate": 9.948107459476501e-05, |
|
"loss": 10.3948, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.32748538011695905, |
|
"grad_norm": 0.07530075311660767, |
|
"learning_rate": 9.907884735636226e-05, |
|
"loss": 10.3885, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3742690058479532, |
|
"grad_norm": 0.07579008489847183, |
|
"learning_rate": 9.856347034897919e-05, |
|
"loss": 10.3891, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 0.07757099717855453, |
|
"learning_rate": 9.793626653800219e-05, |
|
"loss": 10.3861, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.4678362573099415, |
|
"grad_norm": 0.10095006227493286, |
|
"learning_rate": 9.719884594661864e-05, |
|
"loss": 10.3798, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5146198830409356, |
|
"grad_norm": 0.084345243871212, |
|
"learning_rate": 9.635310152291039e-05, |
|
"loss": 10.3809, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.5614035087719298, |
|
"grad_norm": 0.07810067385435104, |
|
"learning_rate": 9.540120428068338e-05, |
|
"loss": 10.3871, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.6081871345029239, |
|
"grad_norm": 0.07608596980571747, |
|
"learning_rate": 9.43455977265062e-05, |
|
"loss": 10.3897, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.6549707602339181, |
|
"grad_norm": 0.08369778096675873, |
|
"learning_rate": 9.31889915872638e-05, |
|
"loss": 10.3836, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"grad_norm": 0.08858703821897507, |
|
"learning_rate": 9.193435485432745e-05, |
|
"loss": 10.3823, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7485380116959064, |
|
"grad_norm": 0.09585383534431458, |
|
"learning_rate": 9.058490816219643e-05, |
|
"loss": 10.378, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.7953216374269005, |
|
"grad_norm": 0.08524016290903091, |
|
"learning_rate": 8.914411552117559e-05, |
|
"loss": 10.3886, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.08070208132266998, |
|
"learning_rate": 8.76156754253104e-05, |
|
"loss": 10.3806, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 0.08314181119203568, |
|
"learning_rate": 8.600351135840589e-05, |
|
"loss": 10.3807, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.935672514619883, |
|
"grad_norm": 0.0966922715306282, |
|
"learning_rate": 8.431176172250002e-05, |
|
"loss": 10.3792, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.9824561403508771, |
|
"grad_norm": 0.11607522517442703, |
|
"learning_rate": 8.254476921464484e-05, |
|
"loss": 10.3756, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.0292397660818713, |
|
"grad_norm": 0.14582864940166473, |
|
"learning_rate": 8.070706967926565e-05, |
|
"loss": 16.9191, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.0760233918128654, |
|
"grad_norm": 0.09495727717876434, |
|
"learning_rate": 7.880338046471331e-05, |
|
"loss": 10.564, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.1228070175438596, |
|
"grad_norm": 0.08627945184707642, |
|
"learning_rate": 7.683858831389867e-05, |
|
"loss": 9.984, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.1695906432748537, |
|
"grad_norm": 0.09416957944631577, |
|
"learning_rate": 7.481773682009356e-05, |
|
"loss": 10.337, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.1695906432748537, |
|
"eval_loss": 10.38400650024414, |
|
"eval_runtime": 0.0782, |
|
"eval_samples_per_second": 639.565, |
|
"eval_steps_per_second": 166.287, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.2163742690058479, |
|
"grad_norm": 0.11057508736848831, |
|
"learning_rate": 7.274601348009935e-05, |
|
"loss": 10.1665, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.263157894736842, |
|
"grad_norm": 0.11977867782115936, |
|
"learning_rate": 7.062873637801692e-05, |
|
"loss": 10.8457, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.3099415204678362, |
|
"grad_norm": 0.09831052273511887, |
|
"learning_rate": 6.847134053380112e-05, |
|
"loss": 10.5002, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.3567251461988303, |
|
"grad_norm": 0.09686669707298279, |
|
"learning_rate": 6.627936395164243e-05, |
|
"loss": 10.2253, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.4035087719298245, |
|
"grad_norm": 0.09972165524959564, |
|
"learning_rate": 6.40584334039897e-05, |
|
"loss": 10.1396, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.4502923976608186, |
|
"grad_norm": 0.10765210539102554, |
|
"learning_rate": 6.181424998770595e-05, |
|
"loss": 9.7169, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.4970760233918128, |
|
"grad_norm": 0.12939834594726562, |
|
"learning_rate": 5.955257448943445e-05, |
|
"loss": 10.9171, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.543859649122807, |
|
"grad_norm": 0.12637734413146973, |
|
"learning_rate": 5.727921259774208e-05, |
|
"loss": 10.602, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.590643274853801, |
|
"grad_norm": 0.1105811670422554, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 10.6176, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.6374269005847952, |
|
"grad_norm": 0.11131985485553741, |
|
"learning_rate": 5.2720787402257935e-05, |
|
"loss": 10.2836, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.6842105263157894, |
|
"grad_norm": 0.11741997301578522, |
|
"learning_rate": 5.044742551056556e-05, |
|
"loss": 10.0027, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.7309941520467835, |
|
"grad_norm": 0.14288607239723206, |
|
"learning_rate": 4.8185750012294065e-05, |
|
"loss": 10.4621, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": 0.12223335355520248, |
|
"learning_rate": 4.594156659601029e-05, |
|
"loss": 10.6583, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.8245614035087718, |
|
"grad_norm": 0.12245681881904602, |
|
"learning_rate": 4.372063604835758e-05, |
|
"loss": 9.8662, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.871345029239766, |
|
"grad_norm": 0.12315724790096283, |
|
"learning_rate": 4.152865946619889e-05, |
|
"loss": 10.76, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.9181286549707601, |
|
"grad_norm": 0.12260238081216812, |
|
"learning_rate": 3.93712636219831e-05, |
|
"loss": 10.3994, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.9649122807017543, |
|
"grad_norm": 0.15077726542949677, |
|
"learning_rate": 3.725398651990067e-05, |
|
"loss": 11.4694, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.0116959064327484, |
|
"grad_norm": 0.19197246432304382, |
|
"learning_rate": 3.518226317990646e-05, |
|
"loss": 15.752, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.0584795321637426, |
|
"grad_norm": 0.12943069636821747, |
|
"learning_rate": 3.3161411686101364e-05, |
|
"loss": 10.6442, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"grad_norm": 0.11681056767702103, |
|
"learning_rate": 3.119661953528671e-05, |
|
"loss": 10.0928, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.152046783625731, |
|
"grad_norm": 0.12141174077987671, |
|
"learning_rate": 2.9292930320734335e-05, |
|
"loss": 10.3879, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.198830409356725, |
|
"grad_norm": 0.12950986623764038, |
|
"learning_rate": 2.745523078535517e-05, |
|
"loss": 10.1063, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.245614035087719, |
|
"grad_norm": 0.14073993265628815, |
|
"learning_rate": 2.568823827750001e-05, |
|
"loss": 9.7249, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.2923976608187133, |
|
"grad_norm": 0.14041155576705933, |
|
"learning_rate": 2.39964886415941e-05, |
|
"loss": 11.2327, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.3391812865497075, |
|
"grad_norm": 0.14114905893802643, |
|
"learning_rate": 2.2384324574689612e-05, |
|
"loss": 10.3466, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.3391812865497075, |
|
"eval_loss": 10.37548828125, |
|
"eval_runtime": 0.0757, |
|
"eval_samples_per_second": 660.475, |
|
"eval_steps_per_second": 171.723, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.3859649122807016, |
|
"grad_norm": 0.1374143660068512, |
|
"learning_rate": 2.0855884478824412e-05, |
|
"loss": 10.6913, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.4327485380116958, |
|
"grad_norm": 0.13993822038173676, |
|
"learning_rate": 1.9415091837803573e-05, |
|
"loss": 9.6339, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.47953216374269, |
|
"grad_norm": 0.1723541021347046, |
|
"learning_rate": 1.806564514567258e-05, |
|
"loss": 11.5109, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.526315789473684, |
|
"grad_norm": 0.13717088103294373, |
|
"learning_rate": 1.6811008412736208e-05, |
|
"loss": 9.4536, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.573099415204678, |
|
"grad_norm": 0.13807837665081024, |
|
"learning_rate": 1.5654402273493805e-05, |
|
"loss": 10.6052, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.6198830409356724, |
|
"grad_norm": 0.13864926993846893, |
|
"learning_rate": 1.459879571931663e-05, |
|
"loss": 10.2006, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 0.14294551312923431, |
|
"learning_rate": 1.3646898477089626e-05, |
|
"loss": 10.4874, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.7134502923976607, |
|
"grad_norm": 0.16176921129226685, |
|
"learning_rate": 1.2801154053381386e-05, |
|
"loss": 11.0147, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.760233918128655, |
|
"grad_norm": 0.13276654481887817, |
|
"learning_rate": 1.2063733461997805e-05, |
|
"loss": 9.6744, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.807017543859649, |
|
"grad_norm": 0.14666683971881866, |
|
"learning_rate": 1.1436529651020813e-05, |
|
"loss": 10.4008, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.853801169590643, |
|
"grad_norm": 0.1457078456878662, |
|
"learning_rate": 1.092115264363775e-05, |
|
"loss": 10.5984, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.9005847953216373, |
|
"grad_norm": 0.13246245682239532, |
|
"learning_rate": 1.0518925405234989e-05, |
|
"loss": 9.8514, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.9473684210526314, |
|
"grad_norm": 0.14585375785827637, |
|
"learning_rate": 1.023088044736472e-05, |
|
"loss": 10.7322, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.9941520467836256, |
|
"grad_norm": 0.1970202624797821, |
|
"learning_rate": 1.0057757177302627e-05, |
|
"loss": 14.7962, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 3.0409356725146197, |
|
"grad_norm": 0.18727658689022064, |
|
"learning_rate": 1e-05, |
|
"loss": 12.5777, |
|
"step": 65 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 65, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 55598477475840.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|