eeeebbb2's picture
Training in progress, step 34, checkpoint
bf169dd verified
{
"best_metric": 0.36255866289138794,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.073446327683616,
"eval_steps": 25,
"global_step": 34,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0903954802259887,
"grad_norm": 40.97779083251953,
"learning_rate": 5e-05,
"loss": 8.227,
"step": 1
},
{
"epoch": 0.0903954802259887,
"eval_loss": 8.391592025756836,
"eval_runtime": 1.4447,
"eval_samples_per_second": 34.61,
"eval_steps_per_second": 8.999,
"step": 1
},
{
"epoch": 0.1807909604519774,
"grad_norm": 40.57859802246094,
"learning_rate": 0.0001,
"loss": 8.3493,
"step": 2
},
{
"epoch": 0.2711864406779661,
"grad_norm": 37.76644515991211,
"learning_rate": 9.978331270024886e-05,
"loss": 6.2252,
"step": 3
},
{
"epoch": 0.3615819209039548,
"grad_norm": 27.821704864501953,
"learning_rate": 9.913533761814537e-05,
"loss": 2.5646,
"step": 4
},
{
"epoch": 0.4519774011299435,
"grad_norm": 23.382740020751953,
"learning_rate": 9.80623151079494e-05,
"loss": 1.332,
"step": 5
},
{
"epoch": 0.5423728813559322,
"grad_norm": 6.967331409454346,
"learning_rate": 9.657457896300791e-05,
"loss": 0.6393,
"step": 6
},
{
"epoch": 0.632768361581921,
"grad_norm": 5.6305365562438965,
"learning_rate": 9.468645689567598e-05,
"loss": 0.4871,
"step": 7
},
{
"epoch": 0.7231638418079096,
"grad_norm": 5.748826503753662,
"learning_rate": 9.241613255361455e-05,
"loss": 0.4325,
"step": 8
},
{
"epoch": 0.8135593220338984,
"grad_norm": 1.4002121686935425,
"learning_rate": 8.978547040132317e-05,
"loss": 0.3653,
"step": 9
},
{
"epoch": 0.903954802259887,
"grad_norm": 1.6485339403152466,
"learning_rate": 8.681980515339464e-05,
"loss": 0.3518,
"step": 10
},
{
"epoch": 0.9943502824858758,
"grad_norm": 4.030787467956543,
"learning_rate": 8.354769778736406e-05,
"loss": 0.3826,
"step": 11
},
{
"epoch": 1.0847457627118644,
"grad_norm": 4.237292766571045,
"learning_rate": 8.000066048588211e-05,
"loss": 0.694,
"step": 12
},
{
"epoch": 1.1751412429378532,
"grad_norm": 2.3578829765319824,
"learning_rate": 7.62128531571699e-05,
"loss": 0.3633,
"step": 13
},
{
"epoch": 1.2655367231638417,
"grad_norm": 1.2775280475616455,
"learning_rate": 7.222075445642904e-05,
"loss": 0.3493,
"step": 14
},
{
"epoch": 1.3559322033898304,
"grad_norm": 5.237298011779785,
"learning_rate": 6.80628104764508e-05,
"loss": 0.3867,
"step": 15
},
{
"epoch": 1.4463276836158192,
"grad_norm": 1.780826210975647,
"learning_rate": 6.377906449072578e-05,
"loss": 0.3456,
"step": 16
},
{
"epoch": 1.536723163841808,
"grad_norm": 0.4675554633140564,
"learning_rate": 5.941077131483025e-05,
"loss": 0.3453,
"step": 17
},
{
"epoch": 1.6271186440677967,
"grad_norm": 1.3954887390136719,
"learning_rate": 5.500000000000001e-05,
"loss": 0.3559,
"step": 18
},
{
"epoch": 1.7175141242937855,
"grad_norm": 0.9117836952209473,
"learning_rate": 5.058922868516978e-05,
"loss": 0.3576,
"step": 19
},
{
"epoch": 1.807909604519774,
"grad_norm": 1.012261986732483,
"learning_rate": 4.6220935509274235e-05,
"loss": 0.3659,
"step": 20
},
{
"epoch": 1.8983050847457628,
"grad_norm": 2.663132429122925,
"learning_rate": 4.19371895235492e-05,
"loss": 0.358,
"step": 21
},
{
"epoch": 1.9887005649717513,
"grad_norm": 1.1675078868865967,
"learning_rate": 3.777924554357096e-05,
"loss": 0.3509,
"step": 22
},
{
"epoch": 2.07909604519774,
"grad_norm": 3.068908214569092,
"learning_rate": 3.378714684283011e-05,
"loss": 0.672,
"step": 23
},
{
"epoch": 2.169491525423729,
"grad_norm": 0.39603307843208313,
"learning_rate": 2.9999339514117912e-05,
"loss": 0.3361,
"step": 24
},
{
"epoch": 2.2598870056497176,
"grad_norm": 0.6155238151550293,
"learning_rate": 2.645230221263596e-05,
"loss": 0.3329,
"step": 25
},
{
"epoch": 2.2598870056497176,
"eval_loss": 0.36255866289138794,
"eval_runtime": 1.4631,
"eval_samples_per_second": 34.174,
"eval_steps_per_second": 8.885,
"step": 25
},
{
"epoch": 2.3502824858757063,
"grad_norm": 1.7378867864608765,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.3541,
"step": 26
},
{
"epoch": 2.440677966101695,
"grad_norm": 1.1388368606567383,
"learning_rate": 2.0214529598676836e-05,
"loss": 0.3549,
"step": 27
},
{
"epoch": 2.5310734463276834,
"grad_norm": 2.1477389335632324,
"learning_rate": 1.758386744638546e-05,
"loss": 0.3571,
"step": 28
},
{
"epoch": 2.621468926553672,
"grad_norm": 0.21045787632465363,
"learning_rate": 1.531354310432403e-05,
"loss": 0.3426,
"step": 29
},
{
"epoch": 2.711864406779661,
"grad_norm": 0.26963287591934204,
"learning_rate": 1.3425421036992098e-05,
"loss": 0.3498,
"step": 30
},
{
"epoch": 2.8022598870056497,
"grad_norm": 0.897307813167572,
"learning_rate": 1.1937684892050604e-05,
"loss": 0.3472,
"step": 31
},
{
"epoch": 2.8926553672316384,
"grad_norm": 0.2607298791408539,
"learning_rate": 1.0864662381854632e-05,
"loss": 0.3494,
"step": 32
},
{
"epoch": 2.983050847457627,
"grad_norm": 0.660701334476471,
"learning_rate": 1.0216687299751144e-05,
"loss": 0.3465,
"step": 33
},
{
"epoch": 3.073446327683616,
"grad_norm": 1.1560474634170532,
"learning_rate": 1e-05,
"loss": 0.6403,
"step": 34
}
],
"logging_steps": 1,
"max_steps": 34,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0808353705112371e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}