math-vinallama-7b-chat / trainer_state.json
Namronaldo2004's picture
Update fine-tuned model
939f636
raw
history blame
8.63 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 0.857689380645752,
"learning_rate": 6.666666666666667e-05,
"loss": 0.6957,
"step": 1
},
{
"epoch": 0.04,
"grad_norm": 0.8586467504501343,
"learning_rate": 0.00013333333333333334,
"loss": 0.7235,
"step": 2
},
{
"epoch": 0.06,
"grad_norm": 0.4279438853263855,
"learning_rate": 0.0002,
"loss": 0.665,
"step": 3
},
{
"epoch": 0.08,
"grad_norm": 0.4085511863231659,
"learning_rate": 0.00019977668786231534,
"loss": 0.625,
"step": 4
},
{
"epoch": 0.1,
"grad_norm": 0.5382882356643677,
"learning_rate": 0.000199107748815478,
"loss": 0.6429,
"step": 5
},
{
"epoch": 0.12,
"grad_norm": 0.4320605993270874,
"learning_rate": 0.0001979961705036587,
"loss": 0.6354,
"step": 6
},
{
"epoch": 0.14,
"grad_norm": 0.3691844940185547,
"learning_rate": 0.00019644691750543767,
"loss": 0.6049,
"step": 7
},
{
"epoch": 0.16,
"grad_norm": 0.33010587096214294,
"learning_rate": 0.0001944669091607919,
"loss": 0.626,
"step": 8
},
{
"epoch": 0.18,
"grad_norm": 0.3566480576992035,
"learning_rate": 0.00019206498866764288,
"loss": 0.6424,
"step": 9
},
{
"epoch": 0.2,
"grad_norm": 0.3397829532623291,
"learning_rate": 0.00018925188358598813,
"loss": 0.6424,
"step": 10
},
{
"epoch": 0.22,
"grad_norm": 0.32853958010673523,
"learning_rate": 0.00018604015792601396,
"loss": 0.5838,
"step": 11
},
{
"epoch": 0.24,
"grad_norm": 0.32366353273391724,
"learning_rate": 0.00018244415603417603,
"loss": 0.6135,
"step": 12
},
{
"epoch": 0.26,
"grad_norm": 0.30376800894737244,
"learning_rate": 0.0001784799385278661,
"loss": 0.6362,
"step": 13
},
{
"epoch": 0.28,
"grad_norm": 0.28993257880210876,
"learning_rate": 0.00017416521056479577,
"loss": 0.5976,
"step": 14
},
{
"epoch": 0.3,
"grad_norm": 0.2954501807689667,
"learning_rate": 0.00016951924276746425,
"loss": 0.6179,
"step": 15
},
{
"epoch": 0.32,
"grad_norm": 0.2875906825065613,
"learning_rate": 0.00016456278515588024,
"loss": 0.6087,
"step": 16
},
{
"epoch": 0.34,
"grad_norm": 0.2935881018638611,
"learning_rate": 0.00015931797447293552,
"loss": 0.5936,
"step": 17
},
{
"epoch": 0.36,
"grad_norm": 0.2834060490131378,
"learning_rate": 0.00015380823531633729,
"loss": 0.601,
"step": 18
},
{
"epoch": 0.38,
"grad_norm": 0.2658578157424927,
"learning_rate": 0.00014805817551866838,
"loss": 0.5869,
"step": 19
},
{
"epoch": 0.4,
"grad_norm": 0.2771172523498535,
"learning_rate": 0.0001420934762428335,
"loss": 0.5686,
"step": 20
},
{
"epoch": 0.42,
"grad_norm": 0.27595141530036926,
"learning_rate": 0.00013594077728375128,
"loss": 0.6035,
"step": 21
},
{
"epoch": 0.44,
"grad_norm": 0.29558807611465454,
"learning_rate": 0.00012962755808856342,
"loss": 0.6189,
"step": 22
},
{
"epoch": 0.46,
"grad_norm": 0.29303988814353943,
"learning_rate": 0.00012318201502675285,
"loss": 0.6322,
"step": 23
},
{
"epoch": 0.48,
"grad_norm": 0.2829428017139435,
"learning_rate": 0.00011663293545831302,
"loss": 0.6079,
"step": 24
},
{
"epoch": 0.5,
"grad_norm": 0.28396308422088623,
"learning_rate": 0.00011000956916240985,
"loss": 0.5784,
"step": 25
},
{
"epoch": 0.52,
"grad_norm": 0.2766873836517334,
"learning_rate": 0.00010334149770076747,
"loss": 0.5971,
"step": 26
},
{
"epoch": 0.54,
"grad_norm": 0.270207017660141,
"learning_rate": 9.665850229923258e-05,
"loss": 0.6075,
"step": 27
},
{
"epoch": 0.56,
"grad_norm": 0.2784218192100525,
"learning_rate": 8.999043083759017e-05,
"loss": 0.5554,
"step": 28
},
{
"epoch": 0.58,
"grad_norm": 0.2829810678958893,
"learning_rate": 8.336706454168701e-05,
"loss": 0.5924,
"step": 29
},
{
"epoch": 0.6,
"grad_norm": 0.2684907913208008,
"learning_rate": 7.681798497324716e-05,
"loss": 0.5428,
"step": 30
},
{
"epoch": 0.62,
"grad_norm": 0.25939464569091797,
"learning_rate": 7.037244191143661e-05,
"loss": 0.5397,
"step": 31
},
{
"epoch": 0.64,
"grad_norm": 0.276192843914032,
"learning_rate": 6.405922271624874e-05,
"loss": 0.5992,
"step": 32
},
{
"epoch": 0.66,
"grad_norm": 0.2892877757549286,
"learning_rate": 5.790652375716652e-05,
"loss": 0.5993,
"step": 33
},
{
"epoch": 0.68,
"grad_norm": 0.25708481669425964,
"learning_rate": 5.1941824481331626e-05,
"loss": 0.5647,
"step": 34
},
{
"epoch": 0.7,
"grad_norm": 0.2700226306915283,
"learning_rate": 4.6191764683662744e-05,
"loss": 0.5424,
"step": 35
},
{
"epoch": 0.72,
"grad_norm": 0.2804276943206787,
"learning_rate": 4.0682025527064486e-05,
"loss": 0.5685,
"step": 36
},
{
"epoch": 0.74,
"grad_norm": 0.2692375183105469,
"learning_rate": 3.543721484411976e-05,
"loss": 0.5512,
"step": 37
},
{
"epoch": 0.76,
"grad_norm": 0.2597506046295166,
"learning_rate": 3.0480757232535772e-05,
"loss": 0.5535,
"step": 38
},
{
"epoch": 0.78,
"grad_norm": 0.2588387727737427,
"learning_rate": 2.5834789435204243e-05,
"loss": 0.5755,
"step": 39
},
{
"epoch": 0.8,
"grad_norm": 0.26815125346183777,
"learning_rate": 2.1520061472133902e-05,
"loss": 0.5859,
"step": 40
},
{
"epoch": 0.82,
"grad_norm": 0.2645345628261566,
"learning_rate": 1.7555843965823992e-05,
"loss": 0.5528,
"step": 41
},
{
"epoch": 0.84,
"grad_norm": 0.25494199991226196,
"learning_rate": 1.3959842073986085e-05,
"loss": 0.5274,
"step": 42
},
{
"epoch": 0.86,
"grad_norm": 0.26076003909111023,
"learning_rate": 1.0748116414011888e-05,
"loss": 0.5554,
"step": 43
},
{
"epoch": 0.88,
"grad_norm": 0.25466349720954895,
"learning_rate": 7.935011332357112e-06,
"loss": 0.5516,
"step": 44
},
{
"epoch": 0.9,
"grad_norm": 0.252687007188797,
"learning_rate": 5.533090839208133e-06,
"loss": 0.562,
"step": 45
},
{
"epoch": 0.92,
"grad_norm": 0.26723793148994446,
"learning_rate": 3.5530824945623542e-06,
"loss": 0.5599,
"step": 46
},
{
"epoch": 0.94,
"grad_norm": 0.27464812994003296,
"learning_rate": 2.003829496341325e-06,
"loss": 0.5998,
"step": 47
},
{
"epoch": 0.96,
"grad_norm": 0.26102861762046814,
"learning_rate": 8.922511845219971e-07,
"loss": 0.5513,
"step": 48
},
{
"epoch": 0.98,
"grad_norm": 0.25358739495277405,
"learning_rate": 2.2331213768468363e-07,
"loss": 0.5438,
"step": 49
},
{
"epoch": 1.0,
"grad_norm": 0.2604013681411743,
"learning_rate": 0.0,
"loss": 0.5642,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.058333143205478e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}