Java-UML-v0.3 / trainer_state.json
HA-Siala's picture
Upload folder using huggingface_hub
6976adc verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 50,
"global_step": 2073,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0964785335262904,
"grad_norm": 0.736626923084259,
"learning_rate": 2.3166023166023166e-06,
"loss": 0.8511,
"step": 50
},
{
"epoch": 0.1929570670525808,
"grad_norm": 1.6079213619232178,
"learning_rate": 4.72972972972973e-06,
"loss": 0.6012,
"step": 100
},
{
"epoch": 0.2894356005788712,
"grad_norm": 0.4046380817890167,
"learning_rate": 7.142857142857143e-06,
"loss": 0.3433,
"step": 150
},
{
"epoch": 0.3859141341051616,
"grad_norm": 0.5595836043357849,
"learning_rate": 9.507722007722009e-06,
"loss": 0.3209,
"step": 200
},
{
"epoch": 0.482392667631452,
"grad_norm": 1.181523323059082,
"learning_rate": 1.1920849420849421e-05,
"loss": 0.3012,
"step": 250
},
{
"epoch": 0.5788712011577424,
"grad_norm": 0.8507217764854431,
"learning_rate": 1.4333976833976834e-05,
"loss": 0.2572,
"step": 300
},
{
"epoch": 0.6753497346840328,
"grad_norm": 0.6581934690475464,
"learning_rate": 1.674710424710425e-05,
"loss": 0.2677,
"step": 350
},
{
"epoch": 0.7718282682103232,
"grad_norm": 0.4028921127319336,
"learning_rate": 1.916023166023166e-05,
"loss": 0.2891,
"step": 400
},
{
"epoch": 0.8683068017366136,
"grad_norm": 0.6789234280586243,
"learning_rate": 2.1573359073359074e-05,
"loss": 0.2928,
"step": 450
},
{
"epoch": 0.964785335262904,
"grad_norm": 0.6921871304512024,
"learning_rate": 2.398648648648649e-05,
"loss": 0.262,
"step": 500
},
{
"epoch": 0.9995176073323685,
"eval_loss": 0.3929877281188965,
"eval_runtime": 195.0178,
"eval_samples_per_second": 1.185,
"eval_steps_per_second": 0.149,
"step": 518
},
{
"epoch": 1.0612638687891944,
"grad_norm": 0.5859283208847046,
"learning_rate": 2.6399613899613903e-05,
"loss": 0.2804,
"step": 550
},
{
"epoch": 1.1577424023154848,
"grad_norm": 0.7389869689941406,
"learning_rate": 2.8812741312741313e-05,
"loss": 0.2208,
"step": 600
},
{
"epoch": 1.2542209358417753,
"grad_norm": 0.4902403950691223,
"learning_rate": 3.122586872586873e-05,
"loss": 0.2518,
"step": 650
},
{
"epoch": 1.3506994693680656,
"grad_norm": 0.6064320802688599,
"learning_rate": 3.3638996138996145e-05,
"loss": 0.2591,
"step": 700
},
{
"epoch": 1.447178002894356,
"grad_norm": 0.44731563329696655,
"learning_rate": 3.605212355212355e-05,
"loss": 0.2585,
"step": 750
},
{
"epoch": 1.5436565364206465,
"grad_norm": 0.4036654531955719,
"learning_rate": 3.8465250965250966e-05,
"loss": 0.2606,
"step": 800
},
{
"epoch": 1.6401350699469368,
"grad_norm": 0.46271753311157227,
"learning_rate": 4.087837837837838e-05,
"loss": 0.2204,
"step": 850
},
{
"epoch": 1.7366136034732271,
"grad_norm": 0.6997343897819519,
"learning_rate": 4.3291505791505795e-05,
"loss": 0.2147,
"step": 900
},
{
"epoch": 1.8330921369995177,
"grad_norm": 0.5480188131332397,
"learning_rate": 4.5704633204633205e-05,
"loss": 0.2408,
"step": 950
},
{
"epoch": 1.929570670525808,
"grad_norm": 0.6384648680686951,
"learning_rate": 4.811776061776062e-05,
"loss": 0.249,
"step": 1000
},
{
"epoch": 1.9990352146647372,
"eval_loss": 0.3673768937587738,
"eval_runtime": 193.186,
"eval_samples_per_second": 1.196,
"eval_steps_per_second": 0.15,
"step": 1036
},
{
"epoch": 2.0260492040520983,
"grad_norm": 0.6000218987464905,
"learning_rate": 4.9867277992277996e-05,
"loss": 0.2716,
"step": 1050
},
{
"epoch": 2.122527737578389,
"grad_norm": 0.42156481742858887,
"learning_rate": 4.9263996138996145e-05,
"loss": 0.242,
"step": 1100
},
{
"epoch": 2.2190062711046794,
"grad_norm": 0.38901010155677795,
"learning_rate": 4.866071428571429e-05,
"loss": 0.2342,
"step": 1150
},
{
"epoch": 2.3154848046309695,
"grad_norm": 0.3799930214881897,
"learning_rate": 4.8057432432432437e-05,
"loss": 0.2021,
"step": 1200
},
{
"epoch": 2.41196333815726,
"grad_norm": 0.5112248659133911,
"learning_rate": 4.745415057915058e-05,
"loss": 0.2327,
"step": 1250
},
{
"epoch": 2.5084418716835506,
"grad_norm": 0.5403085947036743,
"learning_rate": 4.685086872586873e-05,
"loss": 0.2014,
"step": 1300
},
{
"epoch": 2.6049204052098407,
"grad_norm": 0.5263782739639282,
"learning_rate": 4.624758687258687e-05,
"loss": 0.2302,
"step": 1350
},
{
"epoch": 2.7013989387361312,
"grad_norm": 0.5885067582130432,
"learning_rate": 4.5644305019305026e-05,
"loss": 0.1972,
"step": 1400
},
{
"epoch": 2.797877472262422,
"grad_norm": 0.46716436743736267,
"learning_rate": 4.504102316602317e-05,
"loss": 0.2033,
"step": 1450
},
{
"epoch": 2.894356005788712,
"grad_norm": 0.5852177143096924,
"learning_rate": 4.443774131274132e-05,
"loss": 0.2248,
"step": 1500
},
{
"epoch": 2.9908345393150024,
"grad_norm": 0.343026727437973,
"learning_rate": 4.383445945945946e-05,
"loss": 0.2224,
"step": 1550
},
{
"epoch": 2.9985528219971056,
"eval_loss": 0.3541047275066376,
"eval_runtime": 193.2254,
"eval_samples_per_second": 1.195,
"eval_steps_per_second": 0.15,
"step": 1554
},
{
"epoch": 3.087313072841293,
"grad_norm": 0.3919488489627838,
"learning_rate": 4.323117760617761e-05,
"loss": 0.1887,
"step": 1600
},
{
"epoch": 3.183791606367583,
"grad_norm": 0.4615103006362915,
"learning_rate": 4.262789575289575e-05,
"loss": 0.1769,
"step": 1650
},
{
"epoch": 3.2802701398938736,
"grad_norm": 0.2957373857498169,
"learning_rate": 4.20246138996139e-05,
"loss": 0.1953,
"step": 1700
},
{
"epoch": 3.376748673420164,
"grad_norm": 0.35463210940361023,
"learning_rate": 4.142133204633205e-05,
"loss": 0.2113,
"step": 1750
},
{
"epoch": 3.4732272069464543,
"grad_norm": 0.39204856753349304,
"learning_rate": 4.08180501930502e-05,
"loss": 0.1951,
"step": 1800
},
{
"epoch": 3.569705740472745,
"grad_norm": 0.371441125869751,
"learning_rate": 4.0214768339768347e-05,
"loss": 0.2015,
"step": 1850
},
{
"epoch": 3.6661842739990353,
"grad_norm": 1.6117830276489258,
"learning_rate": 3.961148648648649e-05,
"loss": 0.2371,
"step": 1900
},
{
"epoch": 3.7626628075253254,
"grad_norm": 0.3714867830276489,
"learning_rate": 3.900820463320464e-05,
"loss": 0.2153,
"step": 1950
},
{
"epoch": 3.859141341051616,
"grad_norm": 0.6051406264305115,
"learning_rate": 3.840492277992278e-05,
"loss": 0.2008,
"step": 2000
},
{
"epoch": 3.9556198745779065,
"grad_norm": 0.4763951599597931,
"learning_rate": 3.780164092664093e-05,
"loss": 0.1895,
"step": 2050
},
{
"epoch": 4.0,
"eval_loss": 0.35206398367881775,
"eval_runtime": 193.2101,
"eval_samples_per_second": 1.196,
"eval_steps_per_second": 0.15,
"step": 2073
}
],
"logging_steps": 50,
"max_steps": 5180,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 8.938228090829537e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}