ceb_b128_le3_s4000 / checkpoint-2000 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 2000, checkpoint
30f92c9 verified
{
"best_metric": 0.40509888529777527,
"best_model_checkpoint": "mikhail_panzo/ceb_b128_le3_s4000/checkpoint-500",
"epoch": 158.41584158415841,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.9603960396039604,
"grad_norm": 1.6000345945358276,
"learning_rate": 2.5e-05,
"loss": 0.7102,
"step": 50
},
{
"epoch": 7.920792079207921,
"grad_norm": 1.240767478942871,
"learning_rate": 5e-05,
"loss": 0.5129,
"step": 100
},
{
"epoch": 11.881188118811881,
"grad_norm": 2.545292377471924,
"learning_rate": 7.5e-05,
"loss": 0.4701,
"step": 150
},
{
"epoch": 15.841584158415841,
"grad_norm": 3.054067611694336,
"learning_rate": 0.0001,
"loss": 0.4502,
"step": 200
},
{
"epoch": 19.801980198019802,
"grad_norm": 3.884960889816284,
"learning_rate": 0.000125,
"loss": 0.4433,
"step": 250
},
{
"epoch": 23.762376237623762,
"grad_norm": 2.102992296218872,
"learning_rate": 0.00015,
"loss": 0.4361,
"step": 300
},
{
"epoch": 27.722772277227723,
"grad_norm": 3.1947975158691406,
"learning_rate": 0.000175,
"loss": 0.4335,
"step": 350
},
{
"epoch": 31.683168316831683,
"grad_norm": 4.315099716186523,
"learning_rate": 0.0002,
"loss": 0.4322,
"step": 400
},
{
"epoch": 35.64356435643565,
"grad_norm": 6.909496784210205,
"learning_rate": 0.00022500000000000002,
"loss": 0.4346,
"step": 450
},
{
"epoch": 39.603960396039604,
"grad_norm": 2.0800621509552,
"learning_rate": 0.00025,
"loss": 0.42,
"step": 500
},
{
"epoch": 39.603960396039604,
"eval_loss": 0.40509888529777527,
"eval_runtime": 8.3422,
"eval_samples_per_second": 21.577,
"eval_steps_per_second": 2.757,
"step": 500
},
{
"epoch": 43.56435643564357,
"grad_norm": 5.034981727600098,
"learning_rate": 0.000275,
"loss": 0.4336,
"step": 550
},
{
"epoch": 47.524752475247524,
"grad_norm": 6.329326152801514,
"learning_rate": 0.0003,
"loss": 0.4265,
"step": 600
},
{
"epoch": 51.48514851485149,
"grad_norm": 2.624539852142334,
"learning_rate": 0.00032500000000000004,
"loss": 0.4249,
"step": 650
},
{
"epoch": 55.445544554455445,
"grad_norm": 1.945346474647522,
"learning_rate": 0.00035,
"loss": 0.4155,
"step": 700
},
{
"epoch": 59.40594059405941,
"grad_norm": 3.682614326477051,
"learning_rate": 0.000375,
"loss": 0.4117,
"step": 750
},
{
"epoch": 63.366336633663366,
"grad_norm": 1.726725459098816,
"learning_rate": 0.0004,
"loss": 0.4139,
"step": 800
},
{
"epoch": 67.32673267326733,
"grad_norm": 1.665935754776001,
"learning_rate": 0.000425,
"loss": 0.4138,
"step": 850
},
{
"epoch": 71.2871287128713,
"grad_norm": 2.6523916721343994,
"learning_rate": 0.00045000000000000004,
"loss": 0.4098,
"step": 900
},
{
"epoch": 75.24752475247524,
"grad_norm": 4.059382915496826,
"learning_rate": 0.000475,
"loss": 0.4075,
"step": 950
},
{
"epoch": 79.20792079207921,
"grad_norm": 4.141351222991943,
"learning_rate": 0.0005,
"loss": 0.4187,
"step": 1000
},
{
"epoch": 79.20792079207921,
"eval_loss": 0.44087010622024536,
"eval_runtime": 7.9097,
"eval_samples_per_second": 22.757,
"eval_steps_per_second": 2.908,
"step": 1000
},
{
"epoch": 83.16831683168317,
"grad_norm": 4.268443584442139,
"learning_rate": 0.0005250000000000001,
"loss": 0.4233,
"step": 1050
},
{
"epoch": 87.12871287128714,
"grad_norm": 2.5200088024139404,
"learning_rate": 0.00055,
"loss": 0.4165,
"step": 1100
},
{
"epoch": 91.08910891089108,
"grad_norm": 1.1965175867080688,
"learning_rate": 0.000575,
"loss": 0.4131,
"step": 1150
},
{
"epoch": 95.04950495049505,
"grad_norm": 5.397756099700928,
"learning_rate": 0.0006,
"loss": 0.4209,
"step": 1200
},
{
"epoch": 99.00990099009901,
"grad_norm": 4.361856460571289,
"learning_rate": 0.000625,
"loss": 0.4289,
"step": 1250
},
{
"epoch": 102.97029702970298,
"grad_norm": 8.962231636047363,
"learning_rate": 0.0006500000000000001,
"loss": 0.4418,
"step": 1300
},
{
"epoch": 106.93069306930693,
"grad_norm": 5.27103328704834,
"learning_rate": 0.000675,
"loss": 0.4297,
"step": 1350
},
{
"epoch": 110.89108910891089,
"grad_norm": 5.550845623016357,
"learning_rate": 0.0007,
"loss": 0.4394,
"step": 1400
},
{
"epoch": 114.85148514851485,
"grad_norm": 1.9186675548553467,
"learning_rate": 0.000725,
"loss": 0.436,
"step": 1450
},
{
"epoch": 118.81188118811882,
"grad_norm": 2.9912400245666504,
"learning_rate": 0.00075,
"loss": 0.4401,
"step": 1500
},
{
"epoch": 118.81188118811882,
"eval_loss": 0.4779699444770813,
"eval_runtime": 7.8042,
"eval_samples_per_second": 23.065,
"eval_steps_per_second": 2.947,
"step": 1500
},
{
"epoch": 122.77227722772277,
"grad_norm": 2.4975500106811523,
"learning_rate": 0.0007750000000000001,
"loss": 0.4325,
"step": 1550
},
{
"epoch": 126.73267326732673,
"grad_norm": 4.072780609130859,
"learning_rate": 0.0008,
"loss": 0.4258,
"step": 1600
},
{
"epoch": 130.69306930693068,
"grad_norm": 8.71650218963623,
"learning_rate": 0.000825,
"loss": 0.427,
"step": 1650
},
{
"epoch": 134.65346534653466,
"grad_norm": 3.707995653152466,
"learning_rate": 0.00085,
"loss": 0.4227,
"step": 1700
},
{
"epoch": 138.6138613861386,
"grad_norm": 1.5397919416427612,
"learning_rate": 0.000875,
"loss": 0.4194,
"step": 1750
},
{
"epoch": 142.5742574257426,
"grad_norm": 3.601444959640503,
"learning_rate": 0.0009000000000000001,
"loss": 0.4234,
"step": 1800
},
{
"epoch": 146.53465346534654,
"grad_norm": 4.232907772064209,
"learning_rate": 0.000925,
"loss": 0.4276,
"step": 1850
},
{
"epoch": 150.4950495049505,
"grad_norm": 8.078680038452148,
"learning_rate": 0.00095,
"loss": 0.4453,
"step": 1900
},
{
"epoch": 154.45544554455446,
"grad_norm": 1.406493067741394,
"learning_rate": 0.000975,
"loss": 0.4428,
"step": 1950
},
{
"epoch": 158.41584158415841,
"grad_norm": 3.9557948112487793,
"learning_rate": 0.001,
"loss": 0.4456,
"step": 2000
},
{
"epoch": 158.41584158415841,
"eval_loss": 0.4567292630672455,
"eval_runtime": 8.3256,
"eval_samples_per_second": 21.62,
"eval_steps_per_second": 2.763,
"step": 2000
}
],
"logging_steps": 50,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 334,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.322114324975938e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}