reprover_err_random / trainer_state.json
tcwong's picture
Upload 10 files
860d9eb verified
{
"best_metric": 1.20897376537323,
"best_model_checkpoint": "model_training/reprover_err/checkpoints-random-09-08-18-00/checkpoint-250",
"epoch": 7.302231237322515,
"eval_steps": 25,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08113590263691683,
"grad_norm": 23.601030349731445,
"learning_rate": 5.102040816326531e-05,
"loss": 12.5644,
"step": 5
},
{
"epoch": 0.16227180527383367,
"grad_norm": 10.2500638961792,
"learning_rate": 0.00010204081632653062,
"loss": 7.6824,
"step": 10
},
{
"epoch": 0.2434077079107505,
"grad_norm": 0.5588569641113281,
"learning_rate": 0.00015306122448979594,
"loss": 4.0204,
"step": 15
},
{
"epoch": 0.32454361054766734,
"grad_norm": 0.23660239577293396,
"learning_rate": 0.00020408163265306123,
"loss": 3.6085,
"step": 20
},
{
"epoch": 0.4056795131845842,
"grad_norm": 0.249436616897583,
"learning_rate": 0.00025510204081632655,
"loss": 3.5044,
"step": 25
},
{
"epoch": 0.4056795131845842,
"eval_loss": 3.385061025619507,
"eval_runtime": 98.6159,
"eval_samples_per_second": 10.14,
"eval_steps_per_second": 0.639,
"step": 25
},
{
"epoch": 0.486815415821501,
"grad_norm": 1.356109857559204,
"learning_rate": 0.0003061224489795919,
"loss": 3.4346,
"step": 30
},
{
"epoch": 0.5679513184584178,
"grad_norm": 1.5906275510787964,
"learning_rate": 0.00035714285714285714,
"loss": 2.5708,
"step": 35
},
{
"epoch": 0.6490872210953347,
"grad_norm": 0.7469751834869385,
"learning_rate": 0.00040816326530612246,
"loss": 1.9541,
"step": 40
},
{
"epoch": 0.7302231237322515,
"grad_norm": 0.46847933530807495,
"learning_rate": 0.0004591836734693878,
"loss": 1.7316,
"step": 45
},
{
"epoch": 0.8113590263691683,
"grad_norm": 0.7208541035652161,
"learning_rate": 0.0004999935985425297,
"loss": 1.6138,
"step": 50
},
{
"epoch": 0.8113590263691683,
"eval_loss": 1.387947678565979,
"eval_runtime": 98.6024,
"eval_samples_per_second": 10.142,
"eval_steps_per_second": 0.639,
"step": 50
},
{
"epoch": 0.8924949290060852,
"grad_norm": 0.31425440311431885,
"learning_rate": 0.0004997695819512612,
"loss": 1.5905,
"step": 55
},
{
"epoch": 0.973630831643002,
"grad_norm": 0.4940365254878998,
"learning_rate": 0.0004992258202402822,
"loss": 1.5013,
"step": 60
},
{
"epoch": 1.054766734279919,
"grad_norm": 0.3743211328983307,
"learning_rate": 0.0004983630095117843,
"loss": 1.4282,
"step": 65
},
{
"epoch": 1.1359026369168357,
"grad_norm": 0.46512871980667114,
"learning_rate": 0.0004971822543018662,
"loss": 1.386,
"step": 70
},
{
"epoch": 1.2170385395537526,
"grad_norm": 0.2764676511287689,
"learning_rate": 0.0004956850661665511,
"loss": 1.4375,
"step": 75
},
{
"epoch": 1.2170385395537526,
"eval_loss": 1.2822275161743164,
"eval_runtime": 98.6288,
"eval_samples_per_second": 10.139,
"eval_steps_per_second": 0.639,
"step": 75
},
{
"epoch": 1.2981744421906694,
"grad_norm": 0.2621470093727112,
"learning_rate": 0.0004938733617467517,
"loss": 1.4209,
"step": 80
},
{
"epoch": 1.3793103448275863,
"grad_norm": 0.14090226590633392,
"learning_rate": 0.0004917494603146632,
"loss": 1.3766,
"step": 85
},
{
"epoch": 1.460446247464503,
"grad_norm": 0.11969256401062012,
"learning_rate": 0.0004893160808047222,
"loss": 1.3755,
"step": 90
},
{
"epoch": 1.5415821501014197,
"grad_norm": 0.18346261978149414,
"learning_rate": 0.00048657633833293557,
"loss": 1.4168,
"step": 95
},
{
"epoch": 1.6227180527383367,
"grad_norm": 0.3624500036239624,
"learning_rate": 0.0004835337402090316,
"loss": 1.3587,
"step": 100
},
{
"epoch": 1.6227180527383367,
"eval_loss": 1.2857593297958374,
"eval_runtime": 98.6659,
"eval_samples_per_second": 10.135,
"eval_steps_per_second": 0.639,
"step": 100
},
{
"epoch": 1.7038539553752536,
"grad_norm": 0.5834723114967346,
"learning_rate": 0.0004801921814465414,
"loss": 1.3857,
"step": 105
},
{
"epoch": 1.7849898580121704,
"grad_norm": 0.37480711936950684,
"learning_rate": 0.00047655593977655674,
"loss": 1.3745,
"step": 110
},
{
"epoch": 1.866125760649087,
"grad_norm": 0.31830307841300964,
"learning_rate": 0.0004726296701715489,
"loss": 1.3517,
"step": 115
},
{
"epoch": 1.947261663286004,
"grad_norm": 0.14451760053634644,
"learning_rate": 0.00046841839888625623,
"loss": 1.3346,
"step": 120
},
{
"epoch": 2.028397565922921,
"grad_norm": 0.22967207431793213,
"learning_rate": 0.0004639275170232734,
"loss": 1.3735,
"step": 125
},
{
"epoch": 2.028397565922921,
"eval_loss": 1.2710317373275757,
"eval_runtime": 98.685,
"eval_samples_per_second": 10.133,
"eval_steps_per_second": 0.638,
"step": 125
},
{
"epoch": 2.109533468559838,
"grad_norm": 0.16856279969215393,
"learning_rate": 0.0004591627736315743,
"loss": 1.3417,
"step": 130
},
{
"epoch": 2.1906693711967544,
"grad_norm": 0.2670596241950989,
"learning_rate": 0.0004541302683468084,
"loss": 1.3135,
"step": 135
},
{
"epoch": 2.2718052738336714,
"grad_norm": 0.8827645778656006,
"learning_rate": 0.0004488364435827881,
"loss": 1.3399,
"step": 140
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.3872511088848114,
"learning_rate": 0.00044328807628416644,
"loss": 1.3888,
"step": 145
},
{
"epoch": 2.4340770791075053,
"grad_norm": 0.5543010234832764,
"learning_rate": 0.0004374922692508611,
"loss": 1.3678,
"step": 150
},
{
"epoch": 2.4340770791075053,
"eval_loss": 1.283129096031189,
"eval_runtime": 98.6825,
"eval_samples_per_second": 10.134,
"eval_steps_per_second": 0.638,
"step": 150
},
{
"epoch": 2.5152129817444218,
"grad_norm": 0.3389835059642792,
"learning_rate": 0.0004314564420453311,
"loss": 1.3614,
"step": 155
},
{
"epoch": 2.5963488843813387,
"grad_norm": 0.38079357147216797,
"learning_rate": 0.0004251883214943475,
"loss": 1.342,
"step": 160
},
{
"epoch": 2.6774847870182557,
"grad_norm": 0.23523402214050293,
"learning_rate": 0.0004186959317974155,
"loss": 1.3378,
"step": 165
},
{
"epoch": 2.7586206896551726,
"grad_norm": 0.18377311527729034,
"learning_rate": 0.00041198758425451266,
"loss": 1.3272,
"step": 170
},
{
"epoch": 2.839756592292089,
"grad_norm": 0.4048319458961487,
"learning_rate": 0.00040507186662629185,
"loss": 1.3431,
"step": 175
},
{
"epoch": 2.839756592292089,
"eval_loss": 1.252098798751831,
"eval_runtime": 98.7407,
"eval_samples_per_second": 10.128,
"eval_steps_per_second": 0.638,
"step": 175
},
{
"epoch": 2.920892494929006,
"grad_norm": 1.1706334352493286,
"learning_rate": 0.0003979576321403705,
"loss": 1.3111,
"step": 180
},
{
"epoch": 3.002028397565923,
"grad_norm": 0.3772040903568268,
"learning_rate": 0.0003906539881577793,
"loss": 1.3515,
"step": 185
},
{
"epoch": 3.08316430020284,
"grad_norm": 0.4847799241542816,
"learning_rate": 0.0003831702845140801,
"loss": 1.3442,
"step": 190
},
{
"epoch": 3.1643002028397564,
"grad_norm": 0.35955315828323364,
"learning_rate": 0.00037551610155007613,
"loss": 1.338,
"step": 195
},
{
"epoch": 3.2454361054766734,
"grad_norm": 0.30124932527542114,
"learning_rate": 0.00036770123784744027,
"loss": 1.3001,
"step": 200
},
{
"epoch": 3.2454361054766734,
"eval_loss": 1.2483381032943726,
"eval_runtime": 98.609,
"eval_samples_per_second": 10.141,
"eval_steps_per_second": 0.639,
"step": 200
},
{
"epoch": 3.3265720081135903,
"grad_norm": 0.4706196188926697,
"learning_rate": 0.00035973569768495855,
"loss": 1.3099,
"step": 205
},
{
"epoch": 3.4077079107505073,
"grad_norm": 0.4052978754043579,
"learning_rate": 0.0003516296782314491,
"loss": 1.2863,
"step": 210
},
{
"epoch": 3.4888438133874238,
"grad_norm": 0.4775325655937195,
"learning_rate": 0.00034339355649175095,
"loss": 1.3306,
"step": 215
},
{
"epoch": 3.5699797160243407,
"grad_norm": 0.2688348591327667,
"learning_rate": 0.00033503787602249364,
"loss": 1.2971,
"step": 220
},
{
"epoch": 3.6511156186612577,
"grad_norm": 0.16574963927268982,
"learning_rate": 0.00032657333343465356,
"loss": 1.2761,
"step": 225
},
{
"epoch": 3.6511156186612577,
"eval_loss": 1.2276986837387085,
"eval_runtime": 98.6417,
"eval_samples_per_second": 10.138,
"eval_steps_per_second": 0.639,
"step": 225
},
{
"epoch": 3.732251521298174,
"grad_norm": 0.16287527978420258,
"learning_rate": 0.0003180107647001769,
"loss": 1.2611,
"step": 230
},
{
"epoch": 3.813387423935091,
"grad_norm": 0.12902116775512695,
"learning_rate": 0.0003093611312801979,
"loss": 1.2933,
"step": 235
},
{
"epoch": 3.894523326572008,
"grad_norm": 0.19913575053215027,
"learning_rate": 0.00030063550609261025,
"loss": 1.2926,
"step": 240
},
{
"epoch": 3.975659229208925,
"grad_norm": 0.39822715520858765,
"learning_rate": 0.000291845059336957,
"loss": 1.3097,
"step": 245
},
{
"epoch": 4.056795131845842,
"grad_norm": 0.5150278806686401,
"learning_rate": 0.0002830010441947834,
"loss": 1.2907,
"step": 250
},
{
"epoch": 4.056795131845842,
"eval_loss": 1.20897376537323,
"eval_runtime": 98.62,
"eval_samples_per_second": 10.14,
"eval_steps_per_second": 0.639,
"step": 250
},
{
"epoch": 4.137931034482759,
"grad_norm": 0.3091956675052643,
"learning_rate": 0.00027411478242376017,
"loss": 1.2904,
"step": 255
},
{
"epoch": 4.219066937119676,
"grad_norm": 0.2656278908252716,
"learning_rate": 0.00026519764986401774,
"loss": 1.2999,
"step": 260
},
{
"epoch": 4.300202839756592,
"grad_norm": 0.1790953278541565,
"learning_rate": 0.000256261061875247,
"loss": 1.2549,
"step": 265
},
{
"epoch": 4.381338742393509,
"grad_norm": 0.15690313279628754,
"learning_rate": 0.0002473164587232079,
"loss": 1.3,
"step": 270
},
{
"epoch": 4.462474645030426,
"grad_norm": 0.12892165780067444,
"learning_rate": 0.0002383752909343547,
"loss": 1.2946,
"step": 275
},
{
"epoch": 4.462474645030426,
"eval_loss": 1.2234361171722412,
"eval_runtime": 98.5999,
"eval_samples_per_second": 10.142,
"eval_steps_per_second": 0.639,
"step": 275
},
{
"epoch": 4.543610547667343,
"grad_norm": 0.136368989944458,
"learning_rate": 0.0002294490046373259,
"loss": 1.2971,
"step": 280
},
{
"epoch": 4.62474645030426,
"grad_norm": 0.07033926248550415,
"learning_rate": 0.00022054902691006405,
"loss": 1.2991,
"step": 285
},
{
"epoch": 4.705882352941177,
"grad_norm": 0.08667398989200592,
"learning_rate": 0.00021168675115132315,
"loss": 1.291,
"step": 290
},
{
"epoch": 4.787018255578094,
"grad_norm": 0.08972382545471191,
"learning_rate": 0.00020287352249529153,
"loss": 1.3007,
"step": 295
},
{
"epoch": 4.8681541582150105,
"grad_norm": 0.1722905933856964,
"learning_rate": 0.00019412062328800044,
"loss": 1.2729,
"step": 300
},
{
"epoch": 4.8681541582150105,
"eval_loss": 1.2213460206985474,
"eval_runtime": 98.5784,
"eval_samples_per_second": 10.144,
"eval_steps_per_second": 0.639,
"step": 300
},
{
"epoch": 4.9492900608519275,
"grad_norm": 0.10548505187034607,
"learning_rate": 0.000185439258644112,
"loss": 1.2854,
"step": 305
},
{
"epoch": 5.0304259634888435,
"grad_norm": 0.08486346155405045,
"learning_rate": 0.00017684054210257517,
"loss": 1.236,
"step": 310
},
{
"epoch": 5.1115618661257605,
"grad_norm": 0.10991238802671432,
"learning_rate": 0.00016833548139951395,
"loss": 1.2712,
"step": 315
},
{
"epoch": 5.192697768762677,
"grad_norm": 0.06149250268936157,
"learning_rate": 0.0001599349643765599,
"loss": 1.2807,
"step": 320
},
{
"epoch": 5.273833671399594,
"grad_norm": 0.17640946805477142,
"learning_rate": 0.0001516497450426686,
"loss": 1.3158,
"step": 325
},
{
"epoch": 5.273833671399594,
"eval_loss": 1.2180273532867432,
"eval_runtime": 98.5761,
"eval_samples_per_second": 10.144,
"eval_steps_per_second": 0.639,
"step": 325
},
{
"epoch": 5.354969574036511,
"grad_norm": 0.46290215849876404,
"learning_rate": 0.00014349042980726362,
"loss": 1.2482,
"step": 330
},
{
"epoch": 5.436105476673428,
"grad_norm": 0.16729195415973663,
"learning_rate": 0.0001354674639023318,
"loss": 1.2903,
"step": 335
},
{
"epoch": 5.517241379310345,
"grad_norm": 0.24489770829677582,
"learning_rate": 0.00012759111801085066,
"loss": 1.2757,
"step": 340
},
{
"epoch": 5.598377281947261,
"grad_norm": 0.23532100021839142,
"learning_rate": 0.00011987147511866788,
"loss": 1.2928,
"step": 345
},
{
"epoch": 5.679513184584178,
"grad_norm": 0.07568836212158203,
"learning_rate": 0.00011231841760666186,
"loss": 1.2722,
"step": 350
},
{
"epoch": 5.679513184584178,
"eval_loss": 1.2159314155578613,
"eval_runtime": 98.5804,
"eval_samples_per_second": 10.144,
"eval_steps_per_second": 0.639,
"step": 350
},
{
"epoch": 5.760649087221095,
"grad_norm": 0.07196550071239471,
"learning_rate": 0.0001049416145997094,
"loss": 1.2969,
"step": 355
},
{
"epoch": 5.841784989858012,
"grad_norm": 0.0853937566280365,
"learning_rate": 9.775050958865584e-05,
"loss": 1.306,
"step": 360
},
{
"epoch": 5.922920892494929,
"grad_norm": 0.05001223087310791,
"learning_rate": 9.075430834113152e-05,
"loss": 1.2304,
"step": 365
},
{
"epoch": 6.004056795131846,
"grad_norm": 0.058833617717027664,
"learning_rate": 8.396196711669335e-05,
"loss": 1.2746,
"step": 370
},
{
"epoch": 6.085192697768763,
"grad_norm": 0.07061880826950073,
"learning_rate": 7.738218120137671e-05,
"loss": 1.2522,
"step": 375
},
{
"epoch": 6.085192697768763,
"eval_loss": 1.2192745208740234,
"eval_runtime": 98.5743,
"eval_samples_per_second": 10.145,
"eval_steps_per_second": 0.639,
"step": 375
},
{
"epoch": 6.16632860040568,
"grad_norm": 0.06949684768915176,
"learning_rate": 7.102337377633394e-05,
"loss": 1.2678,
"step": 380
},
{
"epoch": 6.247464503042596,
"grad_norm": 0.08404209464788437,
"learning_rate": 6.489368513481228e-05,
"loss": 1.2792,
"step": 385
},
{
"epoch": 6.328600405679513,
"grad_norm": 0.08185689151287079,
"learning_rate": 5.9000962261273136e-05,
"loss": 1.2779,
"step": 390
},
{
"epoch": 6.40973630831643,
"grad_norm": 0.04312283918261528,
"learning_rate": 5.3352748785993164e-05,
"loss": 1.3018,
"step": 395
},
{
"epoch": 6.490872210953347,
"grad_norm": 0.07406298071146011,
"learning_rate": 4.795627532800806e-05,
"loss": 1.2636,
"step": 400
},
{
"epoch": 6.490872210953347,
"eval_loss": 1.2143380641937256,
"eval_runtime": 98.6065,
"eval_samples_per_second": 10.141,
"eval_steps_per_second": 0.639,
"step": 400
},
{
"epoch": 6.572008113590264,
"grad_norm": 0.043406181037425995,
"learning_rate": 4.281845023876074e-05,
"loss": 1.2578,
"step": 405
},
{
"epoch": 6.653144016227181,
"grad_norm": 0.07357024401426315,
"learning_rate": 3.794585075830329e-05,
"loss": 1.2472,
"step": 410
},
{
"epoch": 6.734279918864098,
"grad_norm": 0.05774565041065216,
"learning_rate": 3.334471459537497e-05,
"loss": 1.3082,
"step": 415
},
{
"epoch": 6.8154158215010145,
"grad_norm": 0.054679855704307556,
"learning_rate": 2.902093194213526e-05,
"loss": 1.2782,
"step": 420
},
{
"epoch": 6.896551724137931,
"grad_norm": 0.050430841743946075,
"learning_rate": 2.4980037933772488e-05,
"loss": 1.3043,
"step": 425
},
{
"epoch": 6.896551724137931,
"eval_loss": 1.2151918411254883,
"eval_runtime": 98.562,
"eval_samples_per_second": 10.146,
"eval_steps_per_second": 0.639,
"step": 425
},
{
"epoch": 6.9776876267748476,
"grad_norm": 0.041136015206575394,
"learning_rate": 2.122720556264357e-05,
"loss": 1.2673,
"step": 430
},
{
"epoch": 7.0588235294117645,
"grad_norm": 0.03708193823695183,
"learning_rate": 1.776723905601438e-05,
"loss": 1.2496,
"step": 435
},
{
"epoch": 7.139959432048681,
"grad_norm": 0.055347807705402374,
"learning_rate": 1.4604567725877926e-05,
"loss": 1.2835,
"step": 440
},
{
"epoch": 7.221095334685598,
"grad_norm": 0.040714431554079056,
"learning_rate": 1.1743240298725116e-05,
"loss": 1.3163,
"step": 445
},
{
"epoch": 7.302231237322515,
"grad_norm": 0.03776278346776962,
"learning_rate": 9.18691973252539e-06,
"loss": 1.2647,
"step": 450
},
{
"epoch": 7.302231237322515,
"eval_loss": 1.2177557945251465,
"eval_runtime": 98.5774,
"eval_samples_per_second": 10.144,
"eval_steps_per_second": 0.639,
"step": 450
}
],
"logging_steps": 5,
"max_steps": 488,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.7034254328345395e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}