fil_b32_le4_s4000 / checkpoint-4000 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 4000, checkpoint
c5bf3c2
{
"best_metric": 0.4066352844238281,
"best_model_checkpoint": "mikhail-panzo/fil_b32_le4_s4000/checkpoint-3500",
"epoch": 88.88888888888889,
"eval_steps": 500,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.1111111111111112,
"grad_norm": 1.5478250980377197,
"learning_rate": 2.5e-06,
"loss": 0.7933,
"step": 50
},
{
"epoch": 2.2222222222222223,
"grad_norm": 2.5992791652679443,
"learning_rate": 5e-06,
"loss": 0.7126,
"step": 100
},
{
"epoch": 3.3333333333333335,
"grad_norm": 4.512153625488281,
"learning_rate": 7.45e-06,
"loss": 0.6643,
"step": 150
},
{
"epoch": 4.444444444444445,
"grad_norm": 5.529618740081787,
"learning_rate": 9.950000000000001e-06,
"loss": 0.6161,
"step": 200
},
{
"epoch": 5.555555555555555,
"grad_norm": 1.547269344329834,
"learning_rate": 1.2450000000000001e-05,
"loss": 0.5572,
"step": 250
},
{
"epoch": 6.666666666666667,
"grad_norm": 1.7310105562210083,
"learning_rate": 1.4950000000000001e-05,
"loss": 0.5307,
"step": 300
},
{
"epoch": 7.777777777777778,
"grad_norm": 4.040494441986084,
"learning_rate": 1.745e-05,
"loss": 0.5259,
"step": 350
},
{
"epoch": 8.88888888888889,
"grad_norm": 1.6819641590118408,
"learning_rate": 1.995e-05,
"loss": 0.4981,
"step": 400
},
{
"epoch": 10.0,
"grad_norm": 3.428649663925171,
"learning_rate": 2.245e-05,
"loss": 0.5008,
"step": 450
},
{
"epoch": 11.11111111111111,
"grad_norm": 2.2339069843292236,
"learning_rate": 2.495e-05,
"loss": 0.4923,
"step": 500
},
{
"epoch": 11.11111111111111,
"eval_loss": 0.445181667804718,
"eval_runtime": 8.1426,
"eval_samples_per_second": 19.527,
"eval_steps_per_second": 2.456,
"step": 500
},
{
"epoch": 12.222222222222221,
"grad_norm": 3.6943907737731934,
"learning_rate": 2.7450000000000003e-05,
"loss": 0.4999,
"step": 550
},
{
"epoch": 13.333333333333334,
"grad_norm": 2.8963587284088135,
"learning_rate": 2.995e-05,
"loss": 0.4723,
"step": 600
},
{
"epoch": 14.444444444444445,
"grad_norm": 2.157615900039673,
"learning_rate": 3.245e-05,
"loss": 0.4771,
"step": 650
},
{
"epoch": 15.555555555555555,
"grad_norm": 4.2828450202941895,
"learning_rate": 3.495e-05,
"loss": 0.4656,
"step": 700
},
{
"epoch": 16.666666666666668,
"grad_norm": 2.30914044380188,
"learning_rate": 3.745e-05,
"loss": 0.4681,
"step": 750
},
{
"epoch": 17.77777777777778,
"grad_norm": 1.7412217855453491,
"learning_rate": 3.995e-05,
"loss": 0.4639,
"step": 800
},
{
"epoch": 18.88888888888889,
"grad_norm": 2.457113742828369,
"learning_rate": 4.245e-05,
"loss": 0.4691,
"step": 850
},
{
"epoch": 20.0,
"grad_norm": 2.960203170776367,
"learning_rate": 4.495e-05,
"loss": 0.4569,
"step": 900
},
{
"epoch": 21.11111111111111,
"grad_norm": 3.9787750244140625,
"learning_rate": 4.745e-05,
"loss": 0.4547,
"step": 950
},
{
"epoch": 22.22222222222222,
"grad_norm": 1.6046252250671387,
"learning_rate": 4.995e-05,
"loss": 0.4488,
"step": 1000
},
{
"epoch": 22.22222222222222,
"eval_loss": 0.42208045721054077,
"eval_runtime": 7.5792,
"eval_samples_per_second": 20.978,
"eval_steps_per_second": 2.639,
"step": 1000
},
{
"epoch": 23.333333333333332,
"grad_norm": 2.2585225105285645,
"learning_rate": 5.245e-05,
"loss": 0.4537,
"step": 1050
},
{
"epoch": 24.444444444444443,
"grad_norm": 2.7472567558288574,
"learning_rate": 5.495e-05,
"loss": 0.4438,
"step": 1100
},
{
"epoch": 25.555555555555557,
"grad_norm": 2.1399290561676025,
"learning_rate": 5.745e-05,
"loss": 0.4474,
"step": 1150
},
{
"epoch": 26.666666666666668,
"grad_norm": 2.671912670135498,
"learning_rate": 5.995000000000001e-05,
"loss": 0.4488,
"step": 1200
},
{
"epoch": 27.77777777777778,
"grad_norm": 4.835938453674316,
"learning_rate": 6.245000000000001e-05,
"loss": 0.4472,
"step": 1250
},
{
"epoch": 28.88888888888889,
"grad_norm": 5.086827754974365,
"learning_rate": 6.494999999999999e-05,
"loss": 0.4526,
"step": 1300
},
{
"epoch": 30.0,
"grad_norm": 2.293753147125244,
"learning_rate": 6.745e-05,
"loss": 0.4448,
"step": 1350
},
{
"epoch": 31.11111111111111,
"grad_norm": 3.609741449356079,
"learning_rate": 6.995e-05,
"loss": 0.438,
"step": 1400
},
{
"epoch": 32.22222222222222,
"grad_norm": 1.580810546875,
"learning_rate": 7.245000000000001e-05,
"loss": 0.4446,
"step": 1450
},
{
"epoch": 33.333333333333336,
"grad_norm": 2.7843821048736572,
"learning_rate": 7.495e-05,
"loss": 0.4413,
"step": 1500
},
{
"epoch": 33.333333333333336,
"eval_loss": 0.41924577951431274,
"eval_runtime": 7.8331,
"eval_samples_per_second": 20.298,
"eval_steps_per_second": 2.553,
"step": 1500
},
{
"epoch": 34.44444444444444,
"grad_norm": 5.7151875495910645,
"learning_rate": 7.745e-05,
"loss": 0.4369,
"step": 1550
},
{
"epoch": 35.55555555555556,
"grad_norm": 2.420909881591797,
"learning_rate": 7.995e-05,
"loss": 0.4491,
"step": 1600
},
{
"epoch": 36.666666666666664,
"grad_norm": 3.388465642929077,
"learning_rate": 8.245e-05,
"loss": 0.4381,
"step": 1650
},
{
"epoch": 37.77777777777778,
"grad_norm": 1.8416664600372314,
"learning_rate": 8.495e-05,
"loss": 0.4332,
"step": 1700
},
{
"epoch": 38.888888888888886,
"grad_norm": 4.991604328155518,
"learning_rate": 8.745000000000001e-05,
"loss": 0.4363,
"step": 1750
},
{
"epoch": 40.0,
"grad_norm": 1.6002848148345947,
"learning_rate": 8.995e-05,
"loss": 0.4296,
"step": 1800
},
{
"epoch": 41.111111111111114,
"grad_norm": 2.4206886291503906,
"learning_rate": 9.245e-05,
"loss": 0.4273,
"step": 1850
},
{
"epoch": 42.22222222222222,
"grad_norm": 3.848668098449707,
"learning_rate": 9.495e-05,
"loss": 0.4244,
"step": 1900
},
{
"epoch": 43.333333333333336,
"grad_norm": 3.4843313694000244,
"learning_rate": 9.745000000000001e-05,
"loss": 0.4213,
"step": 1950
},
{
"epoch": 44.44444444444444,
"grad_norm": 7.2384772300720215,
"learning_rate": 9.995e-05,
"loss": 0.4318,
"step": 2000
},
{
"epoch": 44.44444444444444,
"eval_loss": 0.42309698462486267,
"eval_runtime": 7.2889,
"eval_samples_per_second": 21.814,
"eval_steps_per_second": 2.744,
"step": 2000
},
{
"epoch": 45.55555555555556,
"grad_norm": 1.936278223991394,
"learning_rate": 9.755000000000001e-05,
"loss": 0.4279,
"step": 2050
},
{
"epoch": 46.666666666666664,
"grad_norm": 6.694615840911865,
"learning_rate": 9.505e-05,
"loss": 0.4326,
"step": 2100
},
{
"epoch": 47.77777777777778,
"grad_norm": 3.927391529083252,
"learning_rate": 9.260000000000001e-05,
"loss": 0.434,
"step": 2150
},
{
"epoch": 48.888888888888886,
"grad_norm": 2.543116569519043,
"learning_rate": 9.010000000000001e-05,
"loss": 0.4338,
"step": 2200
},
{
"epoch": 50.0,
"grad_norm": 3.7880661487579346,
"learning_rate": 8.76e-05,
"loss": 0.4239,
"step": 2250
},
{
"epoch": 51.111111111111114,
"grad_norm": 6.448720455169678,
"learning_rate": 8.510000000000001e-05,
"loss": 0.423,
"step": 2300
},
{
"epoch": 52.22222222222222,
"grad_norm": 2.7751946449279785,
"learning_rate": 8.26e-05,
"loss": 0.4182,
"step": 2350
},
{
"epoch": 53.333333333333336,
"grad_norm": 3.261136054992676,
"learning_rate": 8.010000000000001e-05,
"loss": 0.4133,
"step": 2400
},
{
"epoch": 54.44444444444444,
"grad_norm": 3.6887683868408203,
"learning_rate": 7.76e-05,
"loss": 0.4096,
"step": 2450
},
{
"epoch": 55.55555555555556,
"grad_norm": 2.9640533924102783,
"learning_rate": 7.510000000000001e-05,
"loss": 0.4155,
"step": 2500
},
{
"epoch": 55.55555555555556,
"eval_loss": 0.41808074712753296,
"eval_runtime": 7.7845,
"eval_samples_per_second": 20.425,
"eval_steps_per_second": 2.569,
"step": 2500
},
{
"epoch": 56.666666666666664,
"grad_norm": 2.4800713062286377,
"learning_rate": 7.26e-05,
"loss": 0.4101,
"step": 2550
},
{
"epoch": 57.77777777777778,
"grad_norm": 4.109247207641602,
"learning_rate": 7.01e-05,
"loss": 0.4087,
"step": 2600
},
{
"epoch": 58.888888888888886,
"grad_norm": 2.4882519245147705,
"learning_rate": 6.76e-05,
"loss": 0.4022,
"step": 2650
},
{
"epoch": 60.0,
"grad_norm": 2.2635858058929443,
"learning_rate": 6.510000000000001e-05,
"loss": 0.4176,
"step": 2700
},
{
"epoch": 61.111111111111114,
"grad_norm": 5.780401706695557,
"learning_rate": 6.26e-05,
"loss": 0.4194,
"step": 2750
},
{
"epoch": 62.22222222222222,
"grad_norm": 1.2802194356918335,
"learning_rate": 6.0100000000000004e-05,
"loss": 0.3982,
"step": 2800
},
{
"epoch": 63.333333333333336,
"grad_norm": 1.1778165102005005,
"learning_rate": 5.76e-05,
"loss": 0.3994,
"step": 2850
},
{
"epoch": 64.44444444444444,
"grad_norm": 2.8410191535949707,
"learning_rate": 5.5100000000000004e-05,
"loss": 0.3988,
"step": 2900
},
{
"epoch": 65.55555555555556,
"grad_norm": 1.2655699253082275,
"learning_rate": 5.2600000000000005e-05,
"loss": 0.4005,
"step": 2950
},
{
"epoch": 66.66666666666667,
"grad_norm": 2.0291759967803955,
"learning_rate": 5.0100000000000005e-05,
"loss": 0.3961,
"step": 3000
},
{
"epoch": 66.66666666666667,
"eval_loss": 0.4118640422821045,
"eval_runtime": 6.7013,
"eval_samples_per_second": 23.727,
"eval_steps_per_second": 2.984,
"step": 3000
},
{
"epoch": 67.77777777777777,
"grad_norm": 1.4944819211959839,
"learning_rate": 4.76e-05,
"loss": 0.3923,
"step": 3050
},
{
"epoch": 68.88888888888889,
"grad_norm": 1.1773656606674194,
"learning_rate": 4.5100000000000005e-05,
"loss": 0.3951,
"step": 3100
},
{
"epoch": 70.0,
"grad_norm": 1.512491226196289,
"learning_rate": 4.26e-05,
"loss": 0.3947,
"step": 3150
},
{
"epoch": 71.11111111111111,
"grad_norm": 2.610377788543701,
"learning_rate": 4.0100000000000006e-05,
"loss": 0.3922,
"step": 3200
},
{
"epoch": 72.22222222222223,
"grad_norm": 1.6256699562072754,
"learning_rate": 3.76e-05,
"loss": 0.3944,
"step": 3250
},
{
"epoch": 73.33333333333333,
"grad_norm": 1.380975604057312,
"learning_rate": 3.51e-05,
"loss": 0.3851,
"step": 3300
},
{
"epoch": 74.44444444444444,
"grad_norm": 1.1550930738449097,
"learning_rate": 3.26e-05,
"loss": 0.3888,
"step": 3350
},
{
"epoch": 75.55555555555556,
"grad_norm": 4.280844688415527,
"learning_rate": 3.01e-05,
"loss": 0.3916,
"step": 3400
},
{
"epoch": 76.66666666666667,
"grad_norm": 1.0760613679885864,
"learning_rate": 2.7600000000000003e-05,
"loss": 0.386,
"step": 3450
},
{
"epoch": 77.77777777777777,
"grad_norm": 1.1752254962921143,
"learning_rate": 2.51e-05,
"loss": 0.394,
"step": 3500
},
{
"epoch": 77.77777777777777,
"eval_loss": 0.4066352844238281,
"eval_runtime": 6.9681,
"eval_samples_per_second": 22.818,
"eval_steps_per_second": 2.87,
"step": 3500
},
{
"epoch": 78.88888888888889,
"grad_norm": 2.5277838706970215,
"learning_rate": 2.26e-05,
"loss": 0.3853,
"step": 3550
},
{
"epoch": 80.0,
"grad_norm": 1.478399395942688,
"learning_rate": 2.01e-05,
"loss": 0.383,
"step": 3600
},
{
"epoch": 81.11111111111111,
"grad_norm": 1.141695261001587,
"learning_rate": 1.76e-05,
"loss": 0.3869,
"step": 3650
},
{
"epoch": 82.22222222222223,
"grad_norm": 1.0178661346435547,
"learning_rate": 1.51e-05,
"loss": 0.3844,
"step": 3700
},
{
"epoch": 83.33333333333333,
"grad_norm": 0.960378110408783,
"learning_rate": 1.2600000000000001e-05,
"loss": 0.3811,
"step": 3750
},
{
"epoch": 84.44444444444444,
"grad_norm": 1.0344784259796143,
"learning_rate": 1.0100000000000002e-05,
"loss": 0.3879,
"step": 3800
},
{
"epoch": 85.55555555555556,
"grad_norm": 1.1019142866134644,
"learning_rate": 7.6e-06,
"loss": 0.3846,
"step": 3850
},
{
"epoch": 86.66666666666667,
"grad_norm": 0.8295585513114929,
"learning_rate": 5.1e-06,
"loss": 0.3814,
"step": 3900
},
{
"epoch": 87.77777777777777,
"grad_norm": 0.9959574937820435,
"learning_rate": 2.6e-06,
"loss": 0.3802,
"step": 3950
},
{
"epoch": 88.88888888888889,
"grad_norm": 0.9421882033348083,
"learning_rate": 1.0000000000000001e-07,
"loss": 0.3791,
"step": 4000
},
{
"epoch": 88.88888888888889,
"eval_loss": 0.40672534704208374,
"eval_runtime": 7.825,
"eval_samples_per_second": 20.319,
"eval_steps_per_second": 2.556,
"step": 4000
}
],
"logging_steps": 50,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 89,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.7567064523395584e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}