fil_b32_le4_s4000 / checkpoint-2000 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 2000, checkpoint
81ddb90
raw
history blame
8.16 kB
{
"best_metric": 0.41924577951431274,
"best_model_checkpoint": "mikhail-panzo/fil_b32_le4_s4000/checkpoint-1500",
"epoch": 44.44444444444444,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.1111111111111112,
"grad_norm": 1.5478250980377197,
"learning_rate": 2.5e-06,
"loss": 0.7933,
"step": 50
},
{
"epoch": 2.2222222222222223,
"grad_norm": 2.5992791652679443,
"learning_rate": 5e-06,
"loss": 0.7126,
"step": 100
},
{
"epoch": 3.3333333333333335,
"grad_norm": 4.512153625488281,
"learning_rate": 7.45e-06,
"loss": 0.6643,
"step": 150
},
{
"epoch": 4.444444444444445,
"grad_norm": 5.529618740081787,
"learning_rate": 9.950000000000001e-06,
"loss": 0.6161,
"step": 200
},
{
"epoch": 5.555555555555555,
"grad_norm": 1.547269344329834,
"learning_rate": 1.2450000000000001e-05,
"loss": 0.5572,
"step": 250
},
{
"epoch": 6.666666666666667,
"grad_norm": 1.7310105562210083,
"learning_rate": 1.4950000000000001e-05,
"loss": 0.5307,
"step": 300
},
{
"epoch": 7.777777777777778,
"grad_norm": 4.040494441986084,
"learning_rate": 1.745e-05,
"loss": 0.5259,
"step": 350
},
{
"epoch": 8.88888888888889,
"grad_norm": 1.6819641590118408,
"learning_rate": 1.995e-05,
"loss": 0.4981,
"step": 400
},
{
"epoch": 10.0,
"grad_norm": 3.428649663925171,
"learning_rate": 2.245e-05,
"loss": 0.5008,
"step": 450
},
{
"epoch": 11.11111111111111,
"grad_norm": 2.2339069843292236,
"learning_rate": 2.495e-05,
"loss": 0.4923,
"step": 500
},
{
"epoch": 11.11111111111111,
"eval_loss": 0.445181667804718,
"eval_runtime": 8.1426,
"eval_samples_per_second": 19.527,
"eval_steps_per_second": 2.456,
"step": 500
},
{
"epoch": 12.222222222222221,
"grad_norm": 3.6943907737731934,
"learning_rate": 2.7450000000000003e-05,
"loss": 0.4999,
"step": 550
},
{
"epoch": 13.333333333333334,
"grad_norm": 2.8963587284088135,
"learning_rate": 2.995e-05,
"loss": 0.4723,
"step": 600
},
{
"epoch": 14.444444444444445,
"grad_norm": 2.157615900039673,
"learning_rate": 3.245e-05,
"loss": 0.4771,
"step": 650
},
{
"epoch": 15.555555555555555,
"grad_norm": 4.2828450202941895,
"learning_rate": 3.495e-05,
"loss": 0.4656,
"step": 700
},
{
"epoch": 16.666666666666668,
"grad_norm": 2.30914044380188,
"learning_rate": 3.745e-05,
"loss": 0.4681,
"step": 750
},
{
"epoch": 17.77777777777778,
"grad_norm": 1.7412217855453491,
"learning_rate": 3.995e-05,
"loss": 0.4639,
"step": 800
},
{
"epoch": 18.88888888888889,
"grad_norm": 2.457113742828369,
"learning_rate": 4.245e-05,
"loss": 0.4691,
"step": 850
},
{
"epoch": 20.0,
"grad_norm": 2.960203170776367,
"learning_rate": 4.495e-05,
"loss": 0.4569,
"step": 900
},
{
"epoch": 21.11111111111111,
"grad_norm": 3.9787750244140625,
"learning_rate": 4.745e-05,
"loss": 0.4547,
"step": 950
},
{
"epoch": 22.22222222222222,
"grad_norm": 1.6046252250671387,
"learning_rate": 4.995e-05,
"loss": 0.4488,
"step": 1000
},
{
"epoch": 22.22222222222222,
"eval_loss": 0.42208045721054077,
"eval_runtime": 7.5792,
"eval_samples_per_second": 20.978,
"eval_steps_per_second": 2.639,
"step": 1000
},
{
"epoch": 23.333333333333332,
"grad_norm": 2.2585225105285645,
"learning_rate": 5.245e-05,
"loss": 0.4537,
"step": 1050
},
{
"epoch": 24.444444444444443,
"grad_norm": 2.7472567558288574,
"learning_rate": 5.495e-05,
"loss": 0.4438,
"step": 1100
},
{
"epoch": 25.555555555555557,
"grad_norm": 2.1399290561676025,
"learning_rate": 5.745e-05,
"loss": 0.4474,
"step": 1150
},
{
"epoch": 26.666666666666668,
"grad_norm": 2.671912670135498,
"learning_rate": 5.995000000000001e-05,
"loss": 0.4488,
"step": 1200
},
{
"epoch": 27.77777777777778,
"grad_norm": 4.835938453674316,
"learning_rate": 6.245000000000001e-05,
"loss": 0.4472,
"step": 1250
},
{
"epoch": 28.88888888888889,
"grad_norm": 5.086827754974365,
"learning_rate": 6.494999999999999e-05,
"loss": 0.4526,
"step": 1300
},
{
"epoch": 30.0,
"grad_norm": 2.293753147125244,
"learning_rate": 6.745e-05,
"loss": 0.4448,
"step": 1350
},
{
"epoch": 31.11111111111111,
"grad_norm": 3.609741449356079,
"learning_rate": 6.995e-05,
"loss": 0.438,
"step": 1400
},
{
"epoch": 32.22222222222222,
"grad_norm": 1.580810546875,
"learning_rate": 7.245000000000001e-05,
"loss": 0.4446,
"step": 1450
},
{
"epoch": 33.333333333333336,
"grad_norm": 2.7843821048736572,
"learning_rate": 7.495e-05,
"loss": 0.4413,
"step": 1500
},
{
"epoch": 33.333333333333336,
"eval_loss": 0.41924577951431274,
"eval_runtime": 7.8331,
"eval_samples_per_second": 20.298,
"eval_steps_per_second": 2.553,
"step": 1500
},
{
"epoch": 34.44444444444444,
"grad_norm": 5.7151875495910645,
"learning_rate": 7.745e-05,
"loss": 0.4369,
"step": 1550
},
{
"epoch": 35.55555555555556,
"grad_norm": 2.420909881591797,
"learning_rate": 7.995e-05,
"loss": 0.4491,
"step": 1600
},
{
"epoch": 36.666666666666664,
"grad_norm": 3.388465642929077,
"learning_rate": 8.245e-05,
"loss": 0.4381,
"step": 1650
},
{
"epoch": 37.77777777777778,
"grad_norm": 1.8416664600372314,
"learning_rate": 8.495e-05,
"loss": 0.4332,
"step": 1700
},
{
"epoch": 38.888888888888886,
"grad_norm": 4.991604328155518,
"learning_rate": 8.745000000000001e-05,
"loss": 0.4363,
"step": 1750
},
{
"epoch": 40.0,
"grad_norm": 1.6002848148345947,
"learning_rate": 8.995e-05,
"loss": 0.4296,
"step": 1800
},
{
"epoch": 41.111111111111114,
"grad_norm": 2.4206886291503906,
"learning_rate": 9.245e-05,
"loss": 0.4273,
"step": 1850
},
{
"epoch": 42.22222222222222,
"grad_norm": 3.848668098449707,
"learning_rate": 9.495e-05,
"loss": 0.4244,
"step": 1900
},
{
"epoch": 43.333333333333336,
"grad_norm": 3.4843313694000244,
"learning_rate": 9.745000000000001e-05,
"loss": 0.4213,
"step": 1950
},
{
"epoch": 44.44444444444444,
"grad_norm": 7.2384772300720215,
"learning_rate": 9.995e-05,
"loss": 0.4318,
"step": 2000
},
{
"epoch": 44.44444444444444,
"eval_loss": 0.42309698462486267,
"eval_runtime": 7.2889,
"eval_samples_per_second": 21.814,
"eval_steps_per_second": 2.744,
"step": 2000
}
],
"logging_steps": 50,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 89,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.377214139372544e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}