gemma-2-logql / checkpoint-78 /trainer_state.json
sidbin's picture
Upload checkpoint-78/trainer_state.json with huggingface_hub
491a192 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 6,
"global_step": 78,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.038461538461538464,
"grad_norm": 8.71969985961914,
"learning_rate": 1e-05,
"loss": 1.7925,
"step": 1
},
{
"epoch": 0.038461538461538464,
"eval_loss": 2.0411531925201416,
"eval_runtime": 1.6566,
"eval_samples_per_second": 13.28,
"eval_steps_per_second": 1.207,
"step": 1
},
{
"epoch": 0.07692307692307693,
"grad_norm": 4.587398052215576,
"learning_rate": 2e-05,
"loss": 2.1894,
"step": 2
},
{
"epoch": 0.11538461538461539,
"grad_norm": 4.494429111480713,
"learning_rate": 3e-05,
"loss": 1.9438,
"step": 3
},
{
"epoch": 0.15384615384615385,
"grad_norm": 7.78571891784668,
"learning_rate": 4e-05,
"loss": 1.9463,
"step": 4
},
{
"epoch": 0.19230769230769232,
"grad_norm": 6.792930603027344,
"learning_rate": 5e-05,
"loss": 1.8652,
"step": 5
},
{
"epoch": 0.23076923076923078,
"grad_norm": 10.765946388244629,
"learning_rate": 6e-05,
"loss": 1.6872,
"step": 6
},
{
"epoch": 0.23076923076923078,
"eval_loss": 1.6089171171188354,
"eval_runtime": 1.667,
"eval_samples_per_second": 13.198,
"eval_steps_per_second": 1.2,
"step": 6
},
{
"epoch": 0.2692307692307692,
"grad_norm": 3.0110535621643066,
"learning_rate": 7e-05,
"loss": 1.6877,
"step": 7
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.871809959411621,
"learning_rate": 8e-05,
"loss": 1.4855,
"step": 8
},
{
"epoch": 0.34615384615384615,
"grad_norm": 2.223036050796509,
"learning_rate": 9e-05,
"loss": 1.2108,
"step": 9
},
{
"epoch": 0.38461538461538464,
"grad_norm": 2.2891507148742676,
"learning_rate": 0.0001,
"loss": 1.0259,
"step": 10
},
{
"epoch": 0.4230769230769231,
"grad_norm": 2.058546543121338,
"learning_rate": 9.997207818651274e-05,
"loss": 0.8116,
"step": 11
},
{
"epoch": 0.46153846153846156,
"grad_norm": 2.8107495307922363,
"learning_rate": 9.988834393115767e-05,
"loss": 0.6967,
"step": 12
},
{
"epoch": 0.46153846153846156,
"eval_loss": 0.6327503323554993,
"eval_runtime": 1.6315,
"eval_samples_per_second": 13.485,
"eval_steps_per_second": 1.226,
"step": 12
},
{
"epoch": 0.5,
"grad_norm": 2.0721328258514404,
"learning_rate": 9.974889075442521e-05,
"loss": 0.5983,
"step": 13
},
{
"epoch": 0.5384615384615384,
"grad_norm": 1.9303380250930786,
"learning_rate": 9.9553874407739e-05,
"loss": 0.4963,
"step": 14
},
{
"epoch": 0.5769230769230769,
"grad_norm": 1.40817391872406,
"learning_rate": 9.930351269950143e-05,
"loss": 0.3881,
"step": 15
},
{
"epoch": 0.6153846153846154,
"grad_norm": 1.4169831275939941,
"learning_rate": 9.899808525182935e-05,
"loss": 0.3139,
"step": 16
},
{
"epoch": 0.6538461538461539,
"grad_norm": 0.9847315549850464,
"learning_rate": 9.863793318825186e-05,
"loss": 0.2776,
"step": 17
},
{
"epoch": 0.6923076923076923,
"grad_norm": 1.2575572729110718,
"learning_rate": 9.822345875271883e-05,
"loss": 0.3327,
"step": 18
},
{
"epoch": 0.6923076923076923,
"eval_loss": 0.2710803747177124,
"eval_runtime": 1.6739,
"eval_samples_per_second": 13.143,
"eval_steps_per_second": 1.195,
"step": 18
},
{
"epoch": 0.7307692307692307,
"grad_norm": 0.7373743057250977,
"learning_rate": 9.775512486034563e-05,
"loss": 0.1967,
"step": 19
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.8841055035591125,
"learning_rate": 9.723345458039594e-05,
"loss": 0.244,
"step": 20
},
{
"epoch": 0.8076923076923077,
"grad_norm": 0.8621382713317871,
"learning_rate": 9.665903055208014e-05,
"loss": 0.1927,
"step": 21
},
{
"epoch": 0.8461538461538461,
"grad_norm": 0.9871663451194763,
"learning_rate": 9.603249433382144e-05,
"loss": 0.2435,
"step": 22
},
{
"epoch": 0.8846153846153846,
"grad_norm": 0.6780880689620972,
"learning_rate": 9.535454568671704e-05,
"loss": 0.1498,
"step": 23
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.7006078958511353,
"learning_rate": 9.462594179299406e-05,
"loss": 0.1784,
"step": 24
},
{
"epoch": 0.9230769230769231,
"eval_loss": 0.17325076460838318,
"eval_runtime": 1.6307,
"eval_samples_per_second": 13.491,
"eval_steps_per_second": 1.226,
"step": 24
},
{
"epoch": 0.9615384615384616,
"grad_norm": 0.69418865442276,
"learning_rate": 9.384749641033359e-05,
"loss": 0.2146,
"step": 25
},
{
"epoch": 1.0,
"grad_norm": 0.5629783272743225,
"learning_rate": 9.302007896300698e-05,
"loss": 0.1474,
"step": 26
},
{
"epoch": 1.0384615384615385,
"grad_norm": 0.5029877424240112,
"learning_rate": 9.214461357083985e-05,
"loss": 0.1358,
"step": 27
},
{
"epoch": 1.0769230769230769,
"grad_norm": 0.5899357199668884,
"learning_rate": 9.122207801708802e-05,
"loss": 0.134,
"step": 28
},
{
"epoch": 1.1153846153846154,
"grad_norm": 0.7418510913848877,
"learning_rate": 9.025350265637815e-05,
"loss": 0.1158,
"step": 29
},
{
"epoch": 1.1538461538461537,
"grad_norm": 0.7511109113693237,
"learning_rate": 8.923996926393305e-05,
"loss": 0.1136,
"step": 30
},
{
"epoch": 1.1538461538461537,
"eval_loss": 0.1189715564250946,
"eval_runtime": 1.672,
"eval_samples_per_second": 13.158,
"eval_steps_per_second": 1.196,
"step": 30
},
{
"epoch": 1.1923076923076923,
"grad_norm": 0.7893090844154358,
"learning_rate": 8.818260982736661e-05,
"loss": 0.1249,
"step": 31
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.5709223747253418,
"learning_rate": 8.708260528239788e-05,
"loss": 0.0824,
"step": 32
},
{
"epoch": 1.2692307692307692,
"grad_norm": 0.5314195156097412,
"learning_rate": 8.594118419389647e-05,
"loss": 0.0906,
"step": 33
},
{
"epoch": 1.3076923076923077,
"grad_norm": 0.5469984412193298,
"learning_rate": 8.475962138373213e-05,
"loss": 0.0918,
"step": 34
},
{
"epoch": 1.3461538461538463,
"grad_norm": 0.5641104578971863,
"learning_rate": 8.353923650696118e-05,
"loss": 0.0866,
"step": 35
},
{
"epoch": 1.3846153846153846,
"grad_norm": 0.5818964838981628,
"learning_rate": 8.228139257794012e-05,
"loss": 0.0891,
"step": 36
},
{
"epoch": 1.3846153846153846,
"eval_loss": 0.08501879870891571,
"eval_runtime": 1.6676,
"eval_samples_per_second": 13.193,
"eval_steps_per_second": 1.199,
"step": 36
},
{
"epoch": 1.4230769230769231,
"grad_norm": 0.7663794159889221,
"learning_rate": 8.098749444801224e-05,
"loss": 0.0934,
"step": 37
},
{
"epoch": 1.4615384615384617,
"grad_norm": 0.48853516578674316,
"learning_rate": 7.965898723646776e-05,
"loss": 0.0597,
"step": 38
},
{
"epoch": 1.5,
"grad_norm": 0.608406662940979,
"learning_rate": 7.829735471652978e-05,
"loss": 0.0579,
"step": 39
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.49656111001968384,
"learning_rate": 7.690411765816864e-05,
"loss": 0.0607,
"step": 40
},
{
"epoch": 1.5769230769230769,
"grad_norm": 0.6709762215614319,
"learning_rate": 7.548083212959588e-05,
"loss": 0.0761,
"step": 41
},
{
"epoch": 1.6153846153846154,
"grad_norm": 0.6458861827850342,
"learning_rate": 7.402908775933419e-05,
"loss": 0.0746,
"step": 42
},
{
"epoch": 1.6153846153846154,
"eval_loss": 0.06257763504981995,
"eval_runtime": 1.6397,
"eval_samples_per_second": 13.417,
"eval_steps_per_second": 1.22,
"step": 42
},
{
"epoch": 1.6538461538461537,
"grad_norm": 0.527205765247345,
"learning_rate": 7.255050596080509e-05,
"loss": 0.0597,
"step": 43
},
{
"epoch": 1.6923076923076923,
"grad_norm": 0.4971027672290802,
"learning_rate": 7.104673812141675e-05,
"loss": 0.0574,
"step": 44
},
{
"epoch": 1.7307692307692308,
"grad_norm": 0.521812915802002,
"learning_rate": 6.951946375817474e-05,
"loss": 0.0611,
"step": 45
},
{
"epoch": 1.7692307692307692,
"grad_norm": 0.49732235074043274,
"learning_rate": 6.797038864187564e-05,
"loss": 0.0411,
"step": 46
},
{
"epoch": 1.8076923076923077,
"grad_norm": 0.4331294000148773,
"learning_rate": 6.640124289197845e-05,
"loss": 0.0377,
"step": 47
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.5265694260597229,
"learning_rate": 6.481377904428171e-05,
"loss": 0.0522,
"step": 48
},
{
"epoch": 1.8461538461538463,
"eval_loss": 0.046460047364234924,
"eval_runtime": 1.6754,
"eval_samples_per_second": 13.131,
"eval_steps_per_second": 1.194,
"step": 48
},
{
"epoch": 1.8846153846153846,
"grad_norm": 0.5977218747138977,
"learning_rate": 6.320977009356431e-05,
"loss": 0.0543,
"step": 49
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.4617597758769989,
"learning_rate": 6.159100751337642e-05,
"loss": 0.0388,
"step": 50
},
{
"epoch": 1.9615384615384617,
"grad_norm": 0.4888545572757721,
"learning_rate": 5.99592992551918e-05,
"loss": 0.0377,
"step": 51
},
{
"epoch": 2.0,
"grad_norm": 0.43416959047317505,
"learning_rate": 5.831646772915651e-05,
"loss": 0.0326,
"step": 52
},
{
"epoch": 2.0384615384615383,
"grad_norm": 0.4446072578430176,
"learning_rate": 5.666434776868895e-05,
"loss": 0.03,
"step": 53
},
{
"epoch": 2.076923076923077,
"grad_norm": 0.569553792476654,
"learning_rate": 5.5004784581204927e-05,
"loss": 0.033,
"step": 54
},
{
"epoch": 2.076923076923077,
"eval_loss": 0.028210198506712914,
"eval_runtime": 1.7415,
"eval_samples_per_second": 12.633,
"eval_steps_per_second": 1.148,
"step": 54
},
{
"epoch": 2.1153846153846154,
"grad_norm": 0.4060160219669342,
"learning_rate": 5.3339631687256084e-05,
"loss": 0.0245,
"step": 55
},
{
"epoch": 2.1538461538461537,
"grad_norm": 0.48645609617233276,
"learning_rate": 5.167074885038373e-05,
"loss": 0.024,
"step": 56
},
{
"epoch": 2.1923076923076925,
"grad_norm": 0.5603619813919067,
"learning_rate": 5e-05,
"loss": 0.0257,
"step": 57
},
{
"epoch": 2.230769230769231,
"grad_norm": 0.43165791034698486,
"learning_rate": 4.832925114961629e-05,
"loss": 0.0207,
"step": 58
},
{
"epoch": 2.269230769230769,
"grad_norm": 0.48327693343162537,
"learning_rate": 4.666036831274392e-05,
"loss": 0.0308,
"step": 59
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.5035133957862854,
"learning_rate": 4.4995215418795085e-05,
"loss": 0.0333,
"step": 60
},
{
"epoch": 2.3076923076923075,
"eval_loss": 0.02246815524995327,
"eval_runtime": 1.6386,
"eval_samples_per_second": 13.426,
"eval_steps_per_second": 1.221,
"step": 60
},
{
"epoch": 2.3461538461538463,
"grad_norm": 0.38951441645622253,
"learning_rate": 4.333565223131107e-05,
"loss": 0.018,
"step": 61
},
{
"epoch": 2.3846153846153846,
"grad_norm": 0.5282712578773499,
"learning_rate": 4.1683532270843504e-05,
"loss": 0.0246,
"step": 62
},
{
"epoch": 2.423076923076923,
"grad_norm": 0.29735180735588074,
"learning_rate": 4.0040700744808204e-05,
"loss": 0.0165,
"step": 63
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.3622055649757385,
"learning_rate": 3.840899248662358e-05,
"loss": 0.0203,
"step": 64
},
{
"epoch": 2.5,
"grad_norm": 0.39045998454093933,
"learning_rate": 3.6790229906435705e-05,
"loss": 0.0197,
"step": 65
},
{
"epoch": 2.5384615384615383,
"grad_norm": 0.39309993386268616,
"learning_rate": 3.5186220955718306e-05,
"loss": 0.0171,
"step": 66
},
{
"epoch": 2.5384615384615383,
"eval_loss": 0.020282387733459473,
"eval_runtime": 1.6665,
"eval_samples_per_second": 13.202,
"eval_steps_per_second": 1.2,
"step": 66
},
{
"epoch": 2.5769230769230766,
"grad_norm": 0.3418659269809723,
"learning_rate": 3.3598757108021546e-05,
"loss": 0.0112,
"step": 67
},
{
"epoch": 2.6153846153846154,
"grad_norm": 0.4300234317779541,
"learning_rate": 3.202961135812437e-05,
"loss": 0.018,
"step": 68
},
{
"epoch": 2.6538461538461537,
"grad_norm": 0.34998640418052673,
"learning_rate": 3.0480536241825263e-05,
"loss": 0.014,
"step": 69
},
{
"epoch": 2.6923076923076925,
"grad_norm": 0.46385401487350464,
"learning_rate": 2.895326187858326e-05,
"loss": 0.0178,
"step": 70
},
{
"epoch": 2.730769230769231,
"grad_norm": 0.4280989468097687,
"learning_rate": 2.74494940391949e-05,
"loss": 0.0195,
"step": 71
},
{
"epoch": 2.769230769230769,
"grad_norm": 0.337554395198822,
"learning_rate": 2.5970912240665813e-05,
"loss": 0.0172,
"step": 72
},
{
"epoch": 2.769230769230769,
"eval_loss": 0.014381876215338707,
"eval_runtime": 1.6934,
"eval_samples_per_second": 12.991,
"eval_steps_per_second": 1.181,
"step": 72
},
{
"epoch": 2.8076923076923075,
"grad_norm": 0.32722559571266174,
"learning_rate": 2.4519167870404125e-05,
"loss": 0.0172,
"step": 73
},
{
"epoch": 2.8461538461538463,
"grad_norm": 0.3325071334838867,
"learning_rate": 2.3095882341831372e-05,
"loss": 0.0113,
"step": 74
},
{
"epoch": 2.8846153846153846,
"grad_norm": 0.31768810749053955,
"learning_rate": 2.1702645283470236e-05,
"loss": 0.0119,
"step": 75
},
{
"epoch": 2.9230769230769234,
"grad_norm": 0.29802849888801575,
"learning_rate": 2.0341012763532243e-05,
"loss": 0.0131,
"step": 76
},
{
"epoch": 2.9615384615384617,
"grad_norm": 0.26436880230903625,
"learning_rate": 1.9012505551987765e-05,
"loss": 0.011,
"step": 77
},
{
"epoch": 3.0,
"grad_norm": 0.24293173849582672,
"learning_rate": 1.771860742205988e-05,
"loss": 0.0095,
"step": 78
},
{
"epoch": 3.0,
"eval_loss": 0.011883563362061977,
"eval_runtime": 1.6658,
"eval_samples_per_second": 13.207,
"eval_steps_per_second": 1.201,
"step": 78
}
],
"logging_steps": 1,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.039322719014093e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}