fil_b64_le4_s4000 / checkpoint-1000 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 1000, checkpoint
3aff5ef
{
"best_metric": 0.4203202724456787,
"best_model_checkpoint": "mikhail_panzo/fil_b64_le4_s4000/checkpoint-1000",
"epoch": 44.44444444444444,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.2222222222222223,
"grad_norm": 3.1291983127593994,
"learning_rate": 2.5e-06,
"loss": 0.7845,
"step": 50
},
{
"epoch": 4.444444444444445,
"grad_norm": 1.4106616973876953,
"learning_rate": 5e-06,
"loss": 0.7033,
"step": 100
},
{
"epoch": 6.666666666666667,
"grad_norm": 3.4416260719299316,
"learning_rate": 7.5e-06,
"loss": 0.6535,
"step": 150
},
{
"epoch": 8.88888888888889,
"grad_norm": 1.7668957710266113,
"learning_rate": 1e-05,
"loss": 0.5726,
"step": 200
},
{
"epoch": 11.11111111111111,
"grad_norm": 1.3341814279556274,
"learning_rate": 1.25e-05,
"loss": 0.5312,
"step": 250
},
{
"epoch": 13.333333333333334,
"grad_norm": 1.4352439641952515,
"learning_rate": 1.5e-05,
"loss": 0.5094,
"step": 300
},
{
"epoch": 15.555555555555555,
"grad_norm": 1.0031296014785767,
"learning_rate": 1.75e-05,
"loss": 0.495,
"step": 350
},
{
"epoch": 17.77777777777778,
"grad_norm": 3.518950939178467,
"learning_rate": 2e-05,
"loss": 0.489,
"step": 400
},
{
"epoch": 20.0,
"grad_norm": 2.1400034427642822,
"learning_rate": 2.25e-05,
"loss": 0.4816,
"step": 450
},
{
"epoch": 22.22222222222222,
"grad_norm": 1.7150920629501343,
"learning_rate": 2.5e-05,
"loss": 0.4725,
"step": 500
},
{
"epoch": 22.22222222222222,
"eval_loss": 0.4371508061885834,
"eval_runtime": 8.4206,
"eval_samples_per_second": 18.882,
"eval_steps_per_second": 2.375,
"step": 500
},
{
"epoch": 24.444444444444443,
"grad_norm": 1.6130620241165161,
"learning_rate": 2.7500000000000004e-05,
"loss": 0.4607,
"step": 550
},
{
"epoch": 26.666666666666668,
"grad_norm": 2.737618923187256,
"learning_rate": 3e-05,
"loss": 0.4634,
"step": 600
},
{
"epoch": 28.88888888888889,
"grad_norm": 3.0077686309814453,
"learning_rate": 3.2500000000000004e-05,
"loss": 0.4621,
"step": 650
},
{
"epoch": 31.11111111111111,
"grad_norm": 1.4074121713638306,
"learning_rate": 3.5e-05,
"loss": 0.4611,
"step": 700
},
{
"epoch": 33.333333333333336,
"grad_norm": 2.665407657623291,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.4557,
"step": 750
},
{
"epoch": 35.55555555555556,
"grad_norm": 2.857210874557495,
"learning_rate": 4e-05,
"loss": 0.4493,
"step": 800
},
{
"epoch": 37.77777777777778,
"grad_norm": 1.7210990190505981,
"learning_rate": 4.25e-05,
"loss": 0.4489,
"step": 850
},
{
"epoch": 40.0,
"grad_norm": 1.4690616130828857,
"learning_rate": 4.5e-05,
"loss": 0.4494,
"step": 900
},
{
"epoch": 42.22222222222222,
"grad_norm": 1.6961876153945923,
"learning_rate": 4.75e-05,
"loss": 0.4451,
"step": 950
},
{
"epoch": 44.44444444444444,
"grad_norm": 1.8849211931228638,
"learning_rate": 5e-05,
"loss": 0.4415,
"step": 1000
},
{
"epoch": 44.44444444444444,
"eval_loss": 0.4203202724456787,
"eval_runtime": 7.7871,
"eval_samples_per_second": 20.418,
"eval_steps_per_second": 2.568,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 182,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3737902966666208e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}