miqu-limarp-70b / checkpoint-59 /trainer_state.json
NobodyExistsOnTheInternet's picture
Upload folder using huggingface_hub
129337f verified
raw
history blame
7.39 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0172413793103448,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2.5e-05,
"loss": 1.8823,
"step": 1
},
{
"epoch": 0.03,
"learning_rate": 5e-05,
"loss": 1.8925,
"step": 2
},
{
"epoch": 0.05,
"learning_rate": 7.5e-05,
"loss": 1.8355,
"step": 3
},
{
"epoch": 0.07,
"learning_rate": 0.0001,
"loss": 1.8682,
"step": 4
},
{
"epoch": 0.09,
"learning_rate": 0.000125,
"loss": 1.6578,
"step": 5
},
{
"epoch": 0.1,
"learning_rate": 0.00015,
"loss": 1.7746,
"step": 6
},
{
"epoch": 0.12,
"learning_rate": 0.000175,
"loss": 1.7078,
"step": 7
},
{
"epoch": 0.14,
"learning_rate": 0.0002,
"loss": 2.3556,
"step": 8
},
{
"epoch": 0.16,
"learning_rate": 0.00022500000000000002,
"loss": 1.785,
"step": 9
},
{
"epoch": 0.17,
"learning_rate": 0.00025,
"loss": 1.7491,
"step": 10
},
{
"epoch": 0.19,
"learning_rate": 0.0002499874839708436,
"loss": 1.7576,
"step": 11
},
{
"epoch": 0.21,
"learning_rate": 0.0002499499383897902,
"loss": 1.7944,
"step": 12
},
{
"epoch": 0.22,
"learning_rate": 0.0002498873707755852,
"loss": 1.8368,
"step": 13
},
{
"epoch": 0.24,
"learning_rate": 0.0002497997936577979,
"loss": 1.7509,
"step": 14
},
{
"epoch": 0.26,
"learning_rate": 0.0002496872245743125,
"loss": 1.9833,
"step": 15
},
{
"epoch": 0.28,
"learning_rate": 0.0002495496860678158,
"loss": 1.7853,
"step": 16
},
{
"epoch": 0.29,
"learning_rate": 0.00024938720568128326,
"loss": 1.7627,
"step": 17
},
{
"epoch": 0.31,
"learning_rate": 0.0002491998159524629,
"loss": 1.7721,
"step": 18
},
{
"epoch": 0.33,
"learning_rate": 0.0002489875544073596,
"loss": 1.6278,
"step": 19
},
{
"epoch": 0.34,
"learning_rate": 0.00024875046355272046,
"loss": 1.6963,
"step": 20
},
{
"epoch": 0.36,
"learning_rate": 0.0002484885908675223,
"loss": 1.7123,
"step": 21
},
{
"epoch": 0.38,
"learning_rate": 0.0002482019887934636,
"loss": 1.7719,
"step": 22
},
{
"epoch": 0.4,
"learning_rate": 0.00024789071472446306,
"loss": 1.8138,
"step": 23
},
{
"epoch": 0.41,
"learning_rate": 0.00024755483099516584,
"loss": 1.8235,
"step": 24
},
{
"epoch": 0.43,
"learning_rate": 0.0002471944048684608,
"loss": 1.7845,
"step": 25
},
{
"epoch": 0.45,
"learning_rate": 0.0002468095085220104,
"loss": 1.6665,
"step": 26
},
{
"epoch": 0.47,
"learning_rate": 0.00024640021903379704,
"loss": 1.6388,
"step": 27
},
{
"epoch": 0.48,
"learning_rate": 0.00024596661836668736,
"loss": 1.6817,
"step": 28
},
{
"epoch": 0.5,
"learning_rate": 0.0002455087933520188,
"loss": 1.7617,
"step": 29
},
{
"epoch": 0.52,
"learning_rate": 0.0002450268356722112,
"loss": 1.6921,
"step": 30
},
{
"epoch": 0.53,
"learning_rate": 0.00024452084184240636,
"loss": 1.6591,
"step": 31
},
{
"epoch": 0.55,
"learning_rate": 0.00024399091319114082,
"loss": 1.8172,
"step": 32
},
{
"epoch": 0.57,
"learning_rate": 0.00024343715584005372,
"loss": 1.7321,
"step": 33
},
{
"epoch": 0.59,
"learning_rate": 0.00024285968068263553,
"loss": 1.7617,
"step": 34
},
{
"epoch": 0.6,
"learning_rate": 0.00024225860336202074,
"loss": 1.7899,
"step": 35
},
{
"epoch": 0.62,
"learning_rate": 0.00024163404424782967,
"loss": 1.7486,
"step": 36
},
{
"epoch": 0.64,
"learning_rate": 0.0002409861284120637,
"loss": 1.7422,
"step": 37
},
{
"epoch": 0.66,
"learning_rate": 0.0002403149856040586,
"loss": 1.6344,
"step": 38
},
{
"epoch": 0.67,
"learning_rate": 0.0002396207502245017,
"loss": 1.7174,
"step": 39
},
{
"epoch": 0.69,
"learning_rate": 0.00023890356129851697,
"loss": 1.7665,
"step": 40
},
{
"epoch": 0.71,
"learning_rate": 0.00023816356244782462,
"loss": 1.7379,
"step": 41
},
{
"epoch": 0.72,
"learning_rate": 0.0002374009018619796,
"loss": 1.7206,
"step": 42
},
{
"epoch": 0.74,
"learning_rate": 0.00023661573226869606,
"loss": 1.6732,
"step": 43
},
{
"epoch": 0.76,
"learning_rate": 0.00023580821090326233,
"loss": 1.7475,
"step": 44
},
{
"epoch": 0.78,
"learning_rate": 0.00023497849947705368,
"loss": 1.7546,
"step": 45
},
{
"epoch": 0.79,
"learning_rate": 0.00023412676414514853,
"loss": 1.6588,
"step": 46
},
{
"epoch": 0.81,
"learning_rate": 0.00023325317547305487,
"loss": 1.7417,
"step": 47
},
{
"epoch": 0.83,
"learning_rate": 0.00023235790840255328,
"loss": 1.7224,
"step": 48
},
{
"epoch": 0.84,
"learning_rate": 0.0002314411422166639,
"loss": 1.6476,
"step": 49
},
{
"epoch": 0.86,
"learning_rate": 0.00023050306050374382,
"loss": 1.73,
"step": 50
},
{
"epoch": 0.88,
"learning_rate": 0.00022954385112072202,
"loss": 1.6658,
"step": 51
},
{
"epoch": 0.9,
"learning_rate": 0.00022856370615548027,
"loss": 1.7046,
"step": 52
},
{
"epoch": 0.91,
"learning_rate": 0.00022756282188838596,
"loss": 1.7037,
"step": 53
},
{
"epoch": 0.93,
"learning_rate": 0.00022654139875298573,
"loss": 1.7166,
"step": 54
},
{
"epoch": 0.95,
"learning_rate": 0.00022549964129586758,
"loss": 1.7816,
"step": 55
},
{
"epoch": 0.97,
"learning_rate": 0.00022443775813569874,
"loss": 1.7556,
"step": 56
},
{
"epoch": 0.98,
"learning_rate": 0.00022335596192144875,
"loss": 1.7044,
"step": 57
},
{
"epoch": 1.0,
"learning_rate": 0.00022225446928980495,
"loss": 1.7183,
"step": 58
},
{
"epoch": 1.02,
"learning_rate": 0.0002211335008217896,
"loss": 1.738,
"step": 59
}
],
"logging_steps": 1,
"max_steps": 232,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"total_flos": 5.264395754633429e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}