GaetanMichelet's picture
Model save
7e9a50f verified
{
"best_metric": 0.8720391392707825,
"best_model_checkpoint": "data/Gemma-2-2B_task-2_120-samples_config-1_full/checkpoint-88",
"epoch": 15.0,
"eval_steps": 500,
"global_step": 165,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09090909090909091,
"grad_norm": 0.452947199344635,
"learning_rate": 1.818181818181818e-06,
"loss": 1.2338,
"step": 1
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.61541348695755,
"learning_rate": 3.636363636363636e-06,
"loss": 1.481,
"step": 2
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.44616082310676575,
"learning_rate": 7.272727272727272e-06,
"loss": 1.276,
"step": 4
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.48986655473709106,
"learning_rate": 1.0909090909090909e-05,
"loss": 1.3132,
"step": 6
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.476570725440979,
"learning_rate": 1.4545454545454545e-05,
"loss": 1.3946,
"step": 8
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.4344302713871002,
"learning_rate": 1.8181818181818182e-05,
"loss": 1.2764,
"step": 10
},
{
"epoch": 1.0,
"eval_loss": 1.3023805618286133,
"eval_runtime": 9.4008,
"eval_samples_per_second": 2.553,
"eval_steps_per_second": 2.553,
"step": 11
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.4747586250305176,
"learning_rate": 2.1818181818181818e-05,
"loss": 1.3399,
"step": 12
},
{
"epoch": 1.2727272727272727,
"grad_norm": 0.37917423248291016,
"learning_rate": 2.5454545454545454e-05,
"loss": 1.2474,
"step": 14
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.4670597016811371,
"learning_rate": 2.909090909090909e-05,
"loss": 1.2943,
"step": 16
},
{
"epoch": 1.6363636363636362,
"grad_norm": 0.39248985052108765,
"learning_rate": 3.272727272727273e-05,
"loss": 1.2822,
"step": 18
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.31525546312332153,
"learning_rate": 3.6363636363636364e-05,
"loss": 1.1788,
"step": 20
},
{
"epoch": 2.0,
"grad_norm": 0.25972259044647217,
"learning_rate": 4e-05,
"loss": 1.0771,
"step": 22
},
{
"epoch": 2.0,
"eval_loss": 1.1453038454055786,
"eval_runtime": 9.1233,
"eval_samples_per_second": 2.631,
"eval_steps_per_second": 2.631,
"step": 22
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.22562259435653687,
"learning_rate": 4.3636363636363636e-05,
"loss": 1.1234,
"step": 24
},
{
"epoch": 2.3636363636363638,
"grad_norm": 0.2746324837207794,
"learning_rate": 4.7272727272727275e-05,
"loss": 1.1209,
"step": 26
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.26194456219673157,
"learning_rate": 5.090909090909091e-05,
"loss": 1.051,
"step": 28
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.2589878439903259,
"learning_rate": 5.4545454545454546e-05,
"loss": 1.0704,
"step": 30
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.24663648009300232,
"learning_rate": 5.818181818181818e-05,
"loss": 1.0212,
"step": 32
},
{
"epoch": 3.0,
"eval_loss": 1.0336965322494507,
"eval_runtime": 9.1289,
"eval_samples_per_second": 2.629,
"eval_steps_per_second": 2.629,
"step": 33
},
{
"epoch": 3.090909090909091,
"grad_norm": 0.25568827986717224,
"learning_rate": 6.181818181818182e-05,
"loss": 1.0464,
"step": 34
},
{
"epoch": 3.2727272727272725,
"grad_norm": 0.2431456297636032,
"learning_rate": 6.545454545454546e-05,
"loss": 0.9742,
"step": 36
},
{
"epoch": 3.4545454545454546,
"grad_norm": 0.3365744948387146,
"learning_rate": 6.90909090909091e-05,
"loss": 0.9662,
"step": 38
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.2173662930727005,
"learning_rate": 7.272727272727273e-05,
"loss": 0.888,
"step": 40
},
{
"epoch": 3.8181818181818183,
"grad_norm": 0.30230048298835754,
"learning_rate": 7.636363636363637e-05,
"loss": 0.8718,
"step": 42
},
{
"epoch": 4.0,
"grad_norm": 0.1980254054069519,
"learning_rate": 8e-05,
"loss": 0.9453,
"step": 44
},
{
"epoch": 4.0,
"eval_loss": 0.9333186745643616,
"eval_runtime": 9.1213,
"eval_samples_per_second": 2.631,
"eval_steps_per_second": 2.631,
"step": 44
},
{
"epoch": 4.181818181818182,
"grad_norm": 0.20102132856845856,
"learning_rate": 8.363636363636364e-05,
"loss": 0.8602,
"step": 46
},
{
"epoch": 4.363636363636363,
"grad_norm": 0.1937635838985443,
"learning_rate": 8.727272727272727e-05,
"loss": 0.8762,
"step": 48
},
{
"epoch": 4.545454545454545,
"grad_norm": 0.18033844232559204,
"learning_rate": 9.090909090909092e-05,
"loss": 0.8414,
"step": 50
},
{
"epoch": 4.7272727272727275,
"grad_norm": 0.23296000063419342,
"learning_rate": 9.454545454545455e-05,
"loss": 0.8604,
"step": 52
},
{
"epoch": 4.909090909090909,
"grad_norm": 0.16760317981243134,
"learning_rate": 9.818181818181818e-05,
"loss": 0.8729,
"step": 54
},
{
"epoch": 5.0,
"eval_loss": 0.8990704417228699,
"eval_runtime": 9.1197,
"eval_samples_per_second": 2.632,
"eval_steps_per_second": 2.632,
"step": 55
},
{
"epoch": 5.090909090909091,
"grad_norm": 0.16476240754127502,
"learning_rate": 9.999899300364532e-05,
"loss": 0.8126,
"step": 56
},
{
"epoch": 5.2727272727272725,
"grad_norm": 0.18971236050128937,
"learning_rate": 9.99909372761763e-05,
"loss": 0.8493,
"step": 58
},
{
"epoch": 5.454545454545454,
"grad_norm": 0.17847785353660583,
"learning_rate": 9.997482711915927e-05,
"loss": 0.7891,
"step": 60
},
{
"epoch": 5.636363636363637,
"grad_norm": 0.1838752031326294,
"learning_rate": 9.99506651282272e-05,
"loss": 0.8154,
"step": 62
},
{
"epoch": 5.818181818181818,
"grad_norm": 0.18833479285240173,
"learning_rate": 9.991845519630678e-05,
"loss": 0.796,
"step": 64
},
{
"epoch": 6.0,
"grad_norm": 0.18303559720516205,
"learning_rate": 9.987820251299122e-05,
"loss": 0.8348,
"step": 66
},
{
"epoch": 6.0,
"eval_loss": 0.8805074691772461,
"eval_runtime": 9.1239,
"eval_samples_per_second": 2.63,
"eval_steps_per_second": 2.63,
"step": 66
},
{
"epoch": 6.181818181818182,
"grad_norm": 0.19952966272830963,
"learning_rate": 9.982991356370404e-05,
"loss": 0.7571,
"step": 68
},
{
"epoch": 6.363636363636363,
"grad_norm": 0.19458475708961487,
"learning_rate": 9.977359612865423e-05,
"loss": 0.8026,
"step": 70
},
{
"epoch": 6.545454545454545,
"grad_norm": 0.1982027292251587,
"learning_rate": 9.970925928158274e-05,
"loss": 0.7319,
"step": 72
},
{
"epoch": 6.7272727272727275,
"grad_norm": 0.21019749343395233,
"learning_rate": 9.963691338830044e-05,
"loss": 0.7757,
"step": 74
},
{
"epoch": 6.909090909090909,
"grad_norm": 0.1921709179878235,
"learning_rate": 9.955657010501806e-05,
"loss": 0.8039,
"step": 76
},
{
"epoch": 7.0,
"eval_loss": 0.8735120892524719,
"eval_runtime": 9.1246,
"eval_samples_per_second": 2.63,
"eval_steps_per_second": 2.63,
"step": 77
},
{
"epoch": 7.090909090909091,
"grad_norm": 0.19705022871494293,
"learning_rate": 9.946824237646824e-05,
"loss": 0.8284,
"step": 78
},
{
"epoch": 7.2727272727272725,
"grad_norm": 0.2406872808933258,
"learning_rate": 9.937194443381972e-05,
"loss": 0.7087,
"step": 80
},
{
"epoch": 7.454545454545454,
"grad_norm": 0.25308042764663696,
"learning_rate": 9.926769179238466e-05,
"loss": 0.7085,
"step": 82
},
{
"epoch": 7.636363636363637,
"grad_norm": 0.24140937626361847,
"learning_rate": 9.915550124911866e-05,
"loss": 0.7387,
"step": 84
},
{
"epoch": 7.818181818181818,
"grad_norm": 0.2606413662433624,
"learning_rate": 9.903539087991462e-05,
"loss": 0.7332,
"step": 86
},
{
"epoch": 8.0,
"grad_norm": 0.23610040545463562,
"learning_rate": 9.890738003669029e-05,
"loss": 0.7673,
"step": 88
},
{
"epoch": 8.0,
"eval_loss": 0.8720391392707825,
"eval_runtime": 9.1304,
"eval_samples_per_second": 2.629,
"eval_steps_per_second": 2.629,
"step": 88
},
{
"epoch": 8.181818181818182,
"grad_norm": 0.2393406331539154,
"learning_rate": 9.877148934427037e-05,
"loss": 0.7244,
"step": 90
},
{
"epoch": 8.363636363636363,
"grad_norm": 0.28168681263923645,
"learning_rate": 9.862774069706346e-05,
"loss": 0.6773,
"step": 92
},
{
"epoch": 8.545454545454545,
"grad_norm": 0.2992963492870331,
"learning_rate": 9.847615725553456e-05,
"loss": 0.7183,
"step": 94
},
{
"epoch": 8.727272727272727,
"grad_norm": 0.34102120995521545,
"learning_rate": 9.831676344247342e-05,
"loss": 0.6492,
"step": 96
},
{
"epoch": 8.909090909090908,
"grad_norm": 0.3424055278301239,
"learning_rate": 9.814958493905963e-05,
"loss": 0.7032,
"step": 98
},
{
"epoch": 9.0,
"eval_loss": 0.8838250041007996,
"eval_runtime": 9.1248,
"eval_samples_per_second": 2.63,
"eval_steps_per_second": 2.63,
"step": 99
},
{
"epoch": 9.090909090909092,
"grad_norm": 0.33258163928985596,
"learning_rate": 9.797464868072488e-05,
"loss": 0.7071,
"step": 100
},
{
"epoch": 9.272727272727273,
"grad_norm": 0.3306954503059387,
"learning_rate": 9.779198285281325e-05,
"loss": 0.6222,
"step": 102
},
{
"epoch": 9.454545454545455,
"grad_norm": 0.49372628331184387,
"learning_rate": 9.760161688604008e-05,
"loss": 0.6265,
"step": 104
},
{
"epoch": 9.636363636363637,
"grad_norm": 0.4013664424419403,
"learning_rate": 9.740358145174998e-05,
"loss": 0.6911,
"step": 106
},
{
"epoch": 9.818181818181818,
"grad_norm": 0.40040650963783264,
"learning_rate": 9.719790845697533e-05,
"loss": 0.6452,
"step": 108
},
{
"epoch": 10.0,
"grad_norm": 0.4965708255767822,
"learning_rate": 9.698463103929542e-05,
"loss": 0.6184,
"step": 110
},
{
"epoch": 10.0,
"eval_loss": 0.9229183793067932,
"eval_runtime": 9.1339,
"eval_samples_per_second": 2.628,
"eval_steps_per_second": 2.628,
"step": 110
},
{
"epoch": 10.181818181818182,
"grad_norm": 0.4577091634273529,
"learning_rate": 9.676378356149734e-05,
"loss": 0.5792,
"step": 112
},
{
"epoch": 10.363636363636363,
"grad_norm": 0.5148481130599976,
"learning_rate": 9.653540160603956e-05,
"loss": 0.5688,
"step": 114
},
{
"epoch": 10.545454545454545,
"grad_norm": 0.6189824938774109,
"learning_rate": 9.629952196931901e-05,
"loss": 0.5794,
"step": 116
},
{
"epoch": 10.727272727272727,
"grad_norm": 0.5784375071525574,
"learning_rate": 9.60561826557425e-05,
"loss": 0.5668,
"step": 118
},
{
"epoch": 10.909090909090908,
"grad_norm": 0.5274057984352112,
"learning_rate": 9.580542287160348e-05,
"loss": 0.6169,
"step": 120
},
{
"epoch": 11.0,
"eval_loss": 0.9627707600593567,
"eval_runtime": 9.1334,
"eval_samples_per_second": 2.628,
"eval_steps_per_second": 2.628,
"step": 121
},
{
"epoch": 11.090909090909092,
"grad_norm": 0.6668625473976135,
"learning_rate": 9.554728301876526e-05,
"loss": 0.5574,
"step": 122
},
{
"epoch": 11.272727272727273,
"grad_norm": 0.7610654830932617,
"learning_rate": 9.528180468815155e-05,
"loss": 0.512,
"step": 124
},
{
"epoch": 11.454545454545455,
"grad_norm": 0.6925037503242493,
"learning_rate": 9.50090306530454e-05,
"loss": 0.5262,
"step": 126
},
{
"epoch": 11.636363636363637,
"grad_norm": 0.7486491203308105,
"learning_rate": 9.472900486219769e-05,
"loss": 0.5556,
"step": 128
},
{
"epoch": 11.818181818181818,
"grad_norm": 0.7763406038284302,
"learning_rate": 9.444177243274618e-05,
"loss": 0.5267,
"step": 130
},
{
"epoch": 12.0,
"grad_norm": 0.7108902931213379,
"learning_rate": 9.414737964294636e-05,
"loss": 0.4972,
"step": 132
},
{
"epoch": 12.0,
"eval_loss": 1.0208110809326172,
"eval_runtime": 9.1282,
"eval_samples_per_second": 2.629,
"eval_steps_per_second": 2.629,
"step": 132
},
{
"epoch": 12.181818181818182,
"grad_norm": 0.7493733167648315,
"learning_rate": 9.384587392471515e-05,
"loss": 0.4876,
"step": 134
},
{
"epoch": 12.363636363636363,
"grad_norm": 0.8020772337913513,
"learning_rate": 9.353730385598887e-05,
"loss": 0.4389,
"step": 136
},
{
"epoch": 12.545454545454545,
"grad_norm": 0.7725638151168823,
"learning_rate": 9.322171915289635e-05,
"loss": 0.454,
"step": 138
},
{
"epoch": 12.727272727272727,
"grad_norm": 0.7995460629463196,
"learning_rate": 9.289917066174886e-05,
"loss": 0.4439,
"step": 140
},
{
"epoch": 12.909090909090908,
"grad_norm": 0.8994494080543518,
"learning_rate": 9.256971035084785e-05,
"loss": 0.5106,
"step": 142
},
{
"epoch": 13.0,
"eval_loss": 1.069113850593567,
"eval_runtime": 9.1248,
"eval_samples_per_second": 2.63,
"eval_steps_per_second": 2.63,
"step": 143
},
{
"epoch": 13.090909090909092,
"grad_norm": 0.9180720448493958,
"learning_rate": 9.223339130211192e-05,
"loss": 0.3659,
"step": 144
},
{
"epoch": 13.272727272727273,
"grad_norm": 1.7244093418121338,
"learning_rate": 9.189026770252436e-05,
"loss": 0.4182,
"step": 146
},
{
"epoch": 13.454545454545455,
"grad_norm": 0.8965930342674255,
"learning_rate": 9.154039483540273e-05,
"loss": 0.4605,
"step": 148
},
{
"epoch": 13.636363636363637,
"grad_norm": 0.8466988205909729,
"learning_rate": 9.118382907149165e-05,
"loss": 0.4053,
"step": 150
},
{
"epoch": 13.818181818181818,
"grad_norm": 1.2914624214172363,
"learning_rate": 9.082062785988049e-05,
"loss": 0.3835,
"step": 152
},
{
"epoch": 14.0,
"grad_norm": 1.0132311582565308,
"learning_rate": 9.045084971874738e-05,
"loss": 0.3363,
"step": 154
},
{
"epoch": 14.0,
"eval_loss": 1.1943873167037964,
"eval_runtime": 9.1313,
"eval_samples_per_second": 2.628,
"eval_steps_per_second": 2.628,
"step": 154
},
{
"epoch": 14.181818181818182,
"grad_norm": 0.9961689114570618,
"learning_rate": 9.007455422593077e-05,
"loss": 0.3357,
"step": 156
},
{
"epoch": 14.363636363636363,
"grad_norm": 1.5857237577438354,
"learning_rate": 8.969180200933047e-05,
"loss": 0.3513,
"step": 158
},
{
"epoch": 14.545454545454545,
"grad_norm": 1.0027860403060913,
"learning_rate": 8.930265473713938e-05,
"loss": 0.4078,
"step": 160
},
{
"epoch": 14.727272727272727,
"grad_norm": 1.1034413576126099,
"learning_rate": 8.890717510790763e-05,
"loss": 0.3256,
"step": 162
},
{
"epoch": 14.909090909090908,
"grad_norm": 0.9977664351463318,
"learning_rate": 8.850542684044078e-05,
"loss": 0.2612,
"step": 164
},
{
"epoch": 15.0,
"eval_loss": 1.2479809522628784,
"eval_runtime": 9.1352,
"eval_samples_per_second": 2.627,
"eval_steps_per_second": 2.627,
"step": 165
},
{
"epoch": 15.0,
"step": 165,
"total_flos": 4.301791910114099e+16,
"train_loss": 0.7604246639844143,
"train_runtime": 1840.0416,
"train_samples_per_second": 2.391,
"train_steps_per_second": 0.299
}
],
"logging_steps": 2,
"max_steps": 550,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.301791910114099e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}