|
{ |
|
"best_metric": 0.9669813513755798, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.06415911460421846, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0012831822920843692, |
|
"grad_norm": 3.5345535278320312, |
|
"learning_rate": 5e-05, |
|
"loss": 38.0642, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012831822920843692, |
|
"eval_loss": 1.1701257228851318, |
|
"eval_runtime": 1.9057, |
|
"eval_samples_per_second": 26.237, |
|
"eval_steps_per_second": 6.822, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0025663645841687384, |
|
"grad_norm": 3.669259786605835, |
|
"learning_rate": 0.0001, |
|
"loss": 37.1715, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0038495468762531075, |
|
"grad_norm": 3.515411138534546, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 37.364, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005132729168337477, |
|
"grad_norm": 3.9850051403045654, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 36.108, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.006415911460421846, |
|
"grad_norm": 4.171756267547607, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 35.0949, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007699093752506215, |
|
"grad_norm": 3.1119837760925293, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 33.8759, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.008982276044590584, |
|
"grad_norm": 4.50632905960083, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 34.068, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.010265458336674953, |
|
"grad_norm": 5.083990573883057, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 34.5506, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.011548640628759323, |
|
"grad_norm": 5.132439613342285, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 33.2934, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.012831822920843693, |
|
"grad_norm": 3.916484832763672, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 32.4104, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014115005212928062, |
|
"grad_norm": 2.4255878925323486, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 32.3686, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01539818750501243, |
|
"grad_norm": 2.369929552078247, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 31.8053, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0166813697970968, |
|
"grad_norm": 2.6247308254241943, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 32.3517, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.017964552089181168, |
|
"grad_norm": 3.2126903533935547, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 33.238, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01924773438126554, |
|
"grad_norm": 2.6321258544921875, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 33.4018, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.020530916673349907, |
|
"grad_norm": 2.1531386375427246, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 32.5685, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.021814098965434278, |
|
"grad_norm": 1.888481855392456, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 31.1734, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.023097281257518646, |
|
"grad_norm": 1.7277491092681885, |
|
"learning_rate": 7.75e-05, |
|
"loss": 31.66, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.024380463549603014, |
|
"grad_norm": 1.7912100553512573, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 31.0256, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.025663645841687385, |
|
"grad_norm": 1.8502416610717773, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 30.776, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.026946828133771753, |
|
"grad_norm": 2.072106122970581, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 30.05, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.028230010425856124, |
|
"grad_norm": 1.9093363285064697, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 29.555, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.029513192717940492, |
|
"grad_norm": 2.0280885696411133, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 29.988, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03079637501002486, |
|
"grad_norm": 2.303346872329712, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 32.021, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.03207955730210923, |
|
"grad_norm": 2.2593610286712646, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 29.1695, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03207955730210923, |
|
"eval_loss": 0.989615261554718, |
|
"eval_runtime": 1.831, |
|
"eval_samples_per_second": 27.308, |
|
"eval_steps_per_second": 7.1, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0333627395941936, |
|
"grad_norm": 2.4259300231933594, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 33.1251, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03464592188627797, |
|
"grad_norm": 1.9009075164794922, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 31.923, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.035929104178362335, |
|
"grad_norm": 1.9029998779296875, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 30.9763, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03721228647044671, |
|
"grad_norm": 1.6665700674057007, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 29.9786, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03849546876253108, |
|
"grad_norm": 1.5933430194854736, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 30.5074, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03977865105461545, |
|
"grad_norm": 1.5921006202697754, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 30.9787, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.041061833346699814, |
|
"grad_norm": 1.6825135946273804, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 29.4713, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.042345015638784185, |
|
"grad_norm": 1.7640290260314941, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 29.3274, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.043628197930868556, |
|
"grad_norm": 1.6936829090118408, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 29.7702, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.04491138022295292, |
|
"grad_norm": 1.7560760974884033, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 30.3689, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04619456251503729, |
|
"grad_norm": 1.730417251586914, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 29.5927, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04747774480712166, |
|
"grad_norm": 1.891713261604309, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 30.0994, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04876092709920603, |
|
"grad_norm": 1.935252070426941, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 30.6733, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0500441093912904, |
|
"grad_norm": 1.701902985572815, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 31.3318, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05132729168337477, |
|
"grad_norm": 1.5853103399276733, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 30.9148, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05261047397545914, |
|
"grad_norm": 1.5667396783828735, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 30.8344, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.053893656267543506, |
|
"grad_norm": 1.5070350170135498, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 30.1059, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.05517683855962788, |
|
"grad_norm": 1.4857155084609985, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 30.039, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.05646002085171225, |
|
"grad_norm": 1.5021370649337769, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 30.441, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.05774320314379661, |
|
"grad_norm": 1.5308558940887451, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 28.8641, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.059026385435880985, |
|
"grad_norm": 1.572772741317749, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 30.0764, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.060309567727965356, |
|
"grad_norm": 1.5550898313522339, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 30.2535, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.06159275002004972, |
|
"grad_norm": 1.7037779092788696, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 29.5456, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.06287593231213409, |
|
"grad_norm": 1.7626756429672241, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 30.1327, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.06415911460421846, |
|
"grad_norm": 1.985012412071228, |
|
"learning_rate": 1e-05, |
|
"loss": 30.0777, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06415911460421846, |
|
"eval_loss": 0.9669813513755798, |
|
"eval_runtime": 1.9069, |
|
"eval_samples_per_second": 26.221, |
|
"eval_steps_per_second": 6.817, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.694680215595254e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|