gemma-2-logql / checkpoint-52 /trainer_state.json
sidbin's picture
Upload checkpoint-52/trainer_state.json with huggingface_hub
b29a0bd verified
raw
history blame
11.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 6,
"global_step": 52,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.038461538461538464,
"grad_norm": 8.71969985961914,
"learning_rate": 1e-05,
"loss": 1.7925,
"step": 1
},
{
"epoch": 0.038461538461538464,
"eval_loss": 2.0411531925201416,
"eval_runtime": 1.6566,
"eval_samples_per_second": 13.28,
"eval_steps_per_second": 1.207,
"step": 1
},
{
"epoch": 0.07692307692307693,
"grad_norm": 4.587398052215576,
"learning_rate": 2e-05,
"loss": 2.1894,
"step": 2
},
{
"epoch": 0.11538461538461539,
"grad_norm": 4.494429111480713,
"learning_rate": 3e-05,
"loss": 1.9438,
"step": 3
},
{
"epoch": 0.15384615384615385,
"grad_norm": 7.78571891784668,
"learning_rate": 4e-05,
"loss": 1.9463,
"step": 4
},
{
"epoch": 0.19230769230769232,
"grad_norm": 6.792930603027344,
"learning_rate": 5e-05,
"loss": 1.8652,
"step": 5
},
{
"epoch": 0.23076923076923078,
"grad_norm": 10.765946388244629,
"learning_rate": 6e-05,
"loss": 1.6872,
"step": 6
},
{
"epoch": 0.23076923076923078,
"eval_loss": 1.6089171171188354,
"eval_runtime": 1.667,
"eval_samples_per_second": 13.198,
"eval_steps_per_second": 1.2,
"step": 6
},
{
"epoch": 0.2692307692307692,
"grad_norm": 3.0110535621643066,
"learning_rate": 7e-05,
"loss": 1.6877,
"step": 7
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.871809959411621,
"learning_rate": 8e-05,
"loss": 1.4855,
"step": 8
},
{
"epoch": 0.34615384615384615,
"grad_norm": 2.223036050796509,
"learning_rate": 9e-05,
"loss": 1.2108,
"step": 9
},
{
"epoch": 0.38461538461538464,
"grad_norm": 2.2891507148742676,
"learning_rate": 0.0001,
"loss": 1.0259,
"step": 10
},
{
"epoch": 0.4230769230769231,
"grad_norm": 2.058546543121338,
"learning_rate": 9.997207818651274e-05,
"loss": 0.8116,
"step": 11
},
{
"epoch": 0.46153846153846156,
"grad_norm": 2.8107495307922363,
"learning_rate": 9.988834393115767e-05,
"loss": 0.6967,
"step": 12
},
{
"epoch": 0.46153846153846156,
"eval_loss": 0.6327503323554993,
"eval_runtime": 1.6315,
"eval_samples_per_second": 13.485,
"eval_steps_per_second": 1.226,
"step": 12
},
{
"epoch": 0.5,
"grad_norm": 2.0721328258514404,
"learning_rate": 9.974889075442521e-05,
"loss": 0.5983,
"step": 13
},
{
"epoch": 0.5384615384615384,
"grad_norm": 1.9303380250930786,
"learning_rate": 9.9553874407739e-05,
"loss": 0.4963,
"step": 14
},
{
"epoch": 0.5769230769230769,
"grad_norm": 1.40817391872406,
"learning_rate": 9.930351269950143e-05,
"loss": 0.3881,
"step": 15
},
{
"epoch": 0.6153846153846154,
"grad_norm": 1.4169831275939941,
"learning_rate": 9.899808525182935e-05,
"loss": 0.3139,
"step": 16
},
{
"epoch": 0.6538461538461539,
"grad_norm": 0.9847315549850464,
"learning_rate": 9.863793318825186e-05,
"loss": 0.2776,
"step": 17
},
{
"epoch": 0.6923076923076923,
"grad_norm": 1.2575572729110718,
"learning_rate": 9.822345875271883e-05,
"loss": 0.3327,
"step": 18
},
{
"epoch": 0.6923076923076923,
"eval_loss": 0.2710803747177124,
"eval_runtime": 1.6739,
"eval_samples_per_second": 13.143,
"eval_steps_per_second": 1.195,
"step": 18
},
{
"epoch": 0.7307692307692307,
"grad_norm": 0.7373743057250977,
"learning_rate": 9.775512486034563e-05,
"loss": 0.1967,
"step": 19
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.8841055035591125,
"learning_rate": 9.723345458039594e-05,
"loss": 0.244,
"step": 20
},
{
"epoch": 0.8076923076923077,
"grad_norm": 0.8621382713317871,
"learning_rate": 9.665903055208014e-05,
"loss": 0.1927,
"step": 21
},
{
"epoch": 0.8461538461538461,
"grad_norm": 0.9871663451194763,
"learning_rate": 9.603249433382144e-05,
"loss": 0.2435,
"step": 22
},
{
"epoch": 0.8846153846153846,
"grad_norm": 0.6780880689620972,
"learning_rate": 9.535454568671704e-05,
"loss": 0.1498,
"step": 23
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.7006078958511353,
"learning_rate": 9.462594179299406e-05,
"loss": 0.1784,
"step": 24
},
{
"epoch": 0.9230769230769231,
"eval_loss": 0.17325076460838318,
"eval_runtime": 1.6307,
"eval_samples_per_second": 13.491,
"eval_steps_per_second": 1.226,
"step": 24
},
{
"epoch": 0.9615384615384616,
"grad_norm": 0.69418865442276,
"learning_rate": 9.384749641033359e-05,
"loss": 0.2146,
"step": 25
},
{
"epoch": 1.0,
"grad_norm": 0.5629783272743225,
"learning_rate": 9.302007896300698e-05,
"loss": 0.1474,
"step": 26
},
{
"epoch": 1.0384615384615385,
"grad_norm": 0.5029877424240112,
"learning_rate": 9.214461357083985e-05,
"loss": 0.1358,
"step": 27
},
{
"epoch": 1.0769230769230769,
"grad_norm": 0.5899357199668884,
"learning_rate": 9.122207801708802e-05,
"loss": 0.134,
"step": 28
},
{
"epoch": 1.1153846153846154,
"grad_norm": 0.7418510913848877,
"learning_rate": 9.025350265637815e-05,
"loss": 0.1158,
"step": 29
},
{
"epoch": 1.1538461538461537,
"grad_norm": 0.7511109113693237,
"learning_rate": 8.923996926393305e-05,
"loss": 0.1136,
"step": 30
},
{
"epoch": 1.1538461538461537,
"eval_loss": 0.1189715564250946,
"eval_runtime": 1.672,
"eval_samples_per_second": 13.158,
"eval_steps_per_second": 1.196,
"step": 30
},
{
"epoch": 1.1923076923076923,
"grad_norm": 0.7893090844154358,
"learning_rate": 8.818260982736661e-05,
"loss": 0.1249,
"step": 31
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.5709223747253418,
"learning_rate": 8.708260528239788e-05,
"loss": 0.0824,
"step": 32
},
{
"epoch": 1.2692307692307692,
"grad_norm": 0.5314195156097412,
"learning_rate": 8.594118419389647e-05,
"loss": 0.0906,
"step": 33
},
{
"epoch": 1.3076923076923077,
"grad_norm": 0.5469984412193298,
"learning_rate": 8.475962138373213e-05,
"loss": 0.0918,
"step": 34
},
{
"epoch": 1.3461538461538463,
"grad_norm": 0.5641104578971863,
"learning_rate": 8.353923650696118e-05,
"loss": 0.0866,
"step": 35
},
{
"epoch": 1.3846153846153846,
"grad_norm": 0.5818964838981628,
"learning_rate": 8.228139257794012e-05,
"loss": 0.0891,
"step": 36
},
{
"epoch": 1.3846153846153846,
"eval_loss": 0.08501879870891571,
"eval_runtime": 1.6676,
"eval_samples_per_second": 13.193,
"eval_steps_per_second": 1.199,
"step": 36
},
{
"epoch": 1.4230769230769231,
"grad_norm": 0.7663794159889221,
"learning_rate": 8.098749444801224e-05,
"loss": 0.0934,
"step": 37
},
{
"epoch": 1.4615384615384617,
"grad_norm": 0.48853516578674316,
"learning_rate": 7.965898723646776e-05,
"loss": 0.0597,
"step": 38
},
{
"epoch": 1.5,
"grad_norm": 0.608406662940979,
"learning_rate": 7.829735471652978e-05,
"loss": 0.0579,
"step": 39
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.49656111001968384,
"learning_rate": 7.690411765816864e-05,
"loss": 0.0607,
"step": 40
},
{
"epoch": 1.5769230769230769,
"grad_norm": 0.6709762215614319,
"learning_rate": 7.548083212959588e-05,
"loss": 0.0761,
"step": 41
},
{
"epoch": 1.6153846153846154,
"grad_norm": 0.6458861827850342,
"learning_rate": 7.402908775933419e-05,
"loss": 0.0746,
"step": 42
},
{
"epoch": 1.6153846153846154,
"eval_loss": 0.06257763504981995,
"eval_runtime": 1.6397,
"eval_samples_per_second": 13.417,
"eval_steps_per_second": 1.22,
"step": 42
},
{
"epoch": 1.6538461538461537,
"grad_norm": 0.527205765247345,
"learning_rate": 7.255050596080509e-05,
"loss": 0.0597,
"step": 43
},
{
"epoch": 1.6923076923076923,
"grad_norm": 0.4971027672290802,
"learning_rate": 7.104673812141675e-05,
"loss": 0.0574,
"step": 44
},
{
"epoch": 1.7307692307692308,
"grad_norm": 0.521812915802002,
"learning_rate": 6.951946375817474e-05,
"loss": 0.0611,
"step": 45
},
{
"epoch": 1.7692307692307692,
"grad_norm": 0.49732235074043274,
"learning_rate": 6.797038864187564e-05,
"loss": 0.0411,
"step": 46
},
{
"epoch": 1.8076923076923077,
"grad_norm": 0.4331294000148773,
"learning_rate": 6.640124289197845e-05,
"loss": 0.0377,
"step": 47
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.5265694260597229,
"learning_rate": 6.481377904428171e-05,
"loss": 0.0522,
"step": 48
},
{
"epoch": 1.8461538461538463,
"eval_loss": 0.046460047364234924,
"eval_runtime": 1.6754,
"eval_samples_per_second": 13.131,
"eval_steps_per_second": 1.194,
"step": 48
},
{
"epoch": 1.8846153846153846,
"grad_norm": 0.5977218747138977,
"learning_rate": 6.320977009356431e-05,
"loss": 0.0543,
"step": 49
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.4617597758769989,
"learning_rate": 6.159100751337642e-05,
"loss": 0.0388,
"step": 50
},
{
"epoch": 1.9615384615384617,
"grad_norm": 0.4888545572757721,
"learning_rate": 5.99592992551918e-05,
"loss": 0.0377,
"step": 51
},
{
"epoch": 2.0,
"grad_norm": 0.43416959047317505,
"learning_rate": 5.831646772915651e-05,
"loss": 0.0326,
"step": 52
}
],
"logging_steps": 1,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.6914539966562304e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}