|
{ |
|
"best_metric": 0.6621928215026855, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.03842459173871278, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007684918347742555, |
|
"grad_norm": 1.1188583374023438, |
|
"learning_rate": 0.0001, |
|
"loss": 1.0891, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007684918347742555, |
|
"eval_loss": 3.402336597442627, |
|
"eval_runtime": 191.6418, |
|
"eval_samples_per_second": 2.86, |
|
"eval_steps_per_second": 1.43, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001536983669548511, |
|
"grad_norm": 1.6677217483520508, |
|
"learning_rate": 0.0002, |
|
"loss": 1.4543, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0023054755043227667, |
|
"grad_norm": 1.7429873943328857, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 1.5349, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003073967339097022, |
|
"grad_norm": 1.5597237348556519, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 1.2103, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0038424591738712775, |
|
"grad_norm": 2.7055306434631348, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 1.072, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004610951008645533, |
|
"grad_norm": 2.731037139892578, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 1.0839, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005379442843419789, |
|
"grad_norm": 1.686295986175537, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 0.8141, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006147934678194044, |
|
"grad_norm": 1.4061486721038818, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.6588, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0069164265129683, |
|
"grad_norm": 1.53168523311615, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 0.73, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007684918347742555, |
|
"grad_norm": 1.0826469659805298, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.5536, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00845341018251681, |
|
"grad_norm": 1.28086256980896, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 0.5734, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009221902017291067, |
|
"grad_norm": 1.5090341567993164, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 0.4795, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.009990393852065321, |
|
"grad_norm": 1.4438533782958984, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 0.5919, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.010758885686839578, |
|
"grad_norm": 1.4994761943817139, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.4987, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011527377521613832, |
|
"grad_norm": 1.3660697937011719, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 0.6833, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012295869356388088, |
|
"grad_norm": 1.2865175008773804, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 0.6312, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013064361191162345, |
|
"grad_norm": 1.6526931524276733, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.5865, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0138328530259366, |
|
"grad_norm": 1.6482139825820923, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.6131, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.014601344860710855, |
|
"grad_norm": 1.7616355419158936, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 0.6195, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01536983669548511, |
|
"grad_norm": 1.3635176420211792, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.5372, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.016138328530259365, |
|
"grad_norm": 1.7781474590301514, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 0.8488, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01690682036503362, |
|
"grad_norm": 1.7835301160812378, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.6644, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.017675312199807877, |
|
"grad_norm": 1.841204047203064, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.6199, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.018443804034582133, |
|
"grad_norm": 2.19480562210083, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 0.8213, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01921229586935639, |
|
"grad_norm": 3.188343048095703, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 0.5297, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01921229586935639, |
|
"eval_loss": 0.6813758015632629, |
|
"eval_runtime": 193.0897, |
|
"eval_samples_per_second": 2.838, |
|
"eval_steps_per_second": 1.419, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.019980787704130643, |
|
"grad_norm": 2.3865928649902344, |
|
"learning_rate": 0.0001, |
|
"loss": 0.5742, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0207492795389049, |
|
"grad_norm": 2.167008399963379, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 0.7711, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.021517771373679155, |
|
"grad_norm": 2.8643157482147217, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.7881, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02228626320845341, |
|
"grad_norm": 2.675781011581421, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.8584, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.023054755043227664, |
|
"grad_norm": 5.553785800933838, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.7431, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02382324687800192, |
|
"grad_norm": 2.85943341255188, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 0.8504, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.024591738712776177, |
|
"grad_norm": 2.1145620346069336, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.4937, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.025360230547550433, |
|
"grad_norm": 2.0370304584503174, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 0.8028, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02612872238232469, |
|
"grad_norm": 2.053551197052002, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.7418, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.026897214217098942, |
|
"grad_norm": 1.7939903736114502, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.5822, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0276657060518732, |
|
"grad_norm": 2.718136787414551, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 0.8955, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.028434197886647455, |
|
"grad_norm": 2.0340025424957275, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 0.49, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02920268972142171, |
|
"grad_norm": 2.521495819091797, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.7506, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.029971181556195964, |
|
"grad_norm": 1.7781364917755127, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 0.5979, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03073967339097022, |
|
"grad_norm": 2.1692018508911133, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.654, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.031508165225744476, |
|
"grad_norm": 3.2491769790649414, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.8937, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.03227665706051873, |
|
"grad_norm": 2.3188233375549316, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.7068, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03304514889529299, |
|
"grad_norm": 3.3548412322998047, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 0.9072, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03381364073006724, |
|
"grad_norm": 3.1296138763427734, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.9079, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0345821325648415, |
|
"grad_norm": 2.4776840209960938, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 0.5561, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.035350624399615754, |
|
"grad_norm": 3.5687758922576904, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.7228, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03611911623439001, |
|
"grad_norm": 5.37619686126709, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.8427, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03688760806916427, |
|
"grad_norm": 5.639602184295654, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 1.1088, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03765609990393852, |
|
"grad_norm": 3.618199110031128, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 0.8533, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03842459173871278, |
|
"grad_norm": 5.885342597961426, |
|
"learning_rate": 0.0, |
|
"loss": 0.5561, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03842459173871278, |
|
"eval_loss": 0.6621928215026855, |
|
"eval_runtime": 193.1089, |
|
"eval_samples_per_second": 2.838, |
|
"eval_steps_per_second": 1.419, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.41887283560448e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|