byt5_small_random / trainer_state.json
tcwong's picture
Upload 10 files
34edbb4 verified
raw
history blame
21.3 kB
{
"best_metric": 1.6808745861053467,
"best_model_checkpoint": "model_training/byt5_small/checkpoints-random-09-07-09-31/checkpoint-475",
"epoch": 7.715736040609137,
"eval_steps": 25,
"global_step": 475,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08121827411167512,
"grad_norm": 1909.4637451171875,
"learning_rate": 5.102040816326531e-05,
"loss": 57.0818,
"step": 5
},
{
"epoch": 0.16243654822335024,
"grad_norm": 99.90604400634766,
"learning_rate": 0.00010204081632653062,
"loss": 16.1261,
"step": 10
},
{
"epoch": 0.2436548223350254,
"grad_norm": 3.8745250701904297,
"learning_rate": 0.00015306122448979594,
"loss": 4.167,
"step": 15
},
{
"epoch": 0.3248730964467005,
"grad_norm": 2.7964189052581787,
"learning_rate": 0.00020408163265306123,
"loss": 3.5792,
"step": 20
},
{
"epoch": 0.40609137055837563,
"grad_norm": 2.3573312759399414,
"learning_rate": 0.00025510204081632655,
"loss": 2.6289,
"step": 25
},
{
"epoch": 0.40609137055837563,
"eval_loss": 1.9594327211380005,
"eval_runtime": 89.1162,
"eval_samples_per_second": 11.221,
"eval_steps_per_second": 1.403,
"step": 25
},
{
"epoch": 0.4873096446700508,
"grad_norm": 4.407505512237549,
"learning_rate": 0.0003061224489795919,
"loss": 2.181,
"step": 30
},
{
"epoch": 0.5685279187817259,
"grad_norm": 2.605747699737549,
"learning_rate": 0.00035714285714285714,
"loss": 2.1089,
"step": 35
},
{
"epoch": 0.649746192893401,
"grad_norm": 6.306596755981445,
"learning_rate": 0.00040816326530612246,
"loss": 2.0922,
"step": 40
},
{
"epoch": 0.7309644670050761,
"grad_norm": 19.09569549560547,
"learning_rate": 0.0004591836734693878,
"loss": 2.0705,
"step": 45
},
{
"epoch": 0.8121827411167513,
"grad_norm": 1.264049768447876,
"learning_rate": 0.0004999935985425297,
"loss": 2.0794,
"step": 50
},
{
"epoch": 0.8121827411167513,
"eval_loss": 1.8679453134536743,
"eval_runtime": 89.0729,
"eval_samples_per_second": 11.227,
"eval_steps_per_second": 1.403,
"step": 50
},
{
"epoch": 0.8934010152284264,
"grad_norm": 1.4896085262298584,
"learning_rate": 0.0004997695819512612,
"loss": 2.0143,
"step": 55
},
{
"epoch": 0.9746192893401016,
"grad_norm": 4.419875144958496,
"learning_rate": 0.0004992258202402822,
"loss": 2.0938,
"step": 60
},
{
"epoch": 1.0558375634517767,
"grad_norm": 5.774366855621338,
"learning_rate": 0.0004983630095117843,
"loss": 2.0798,
"step": 65
},
{
"epoch": 1.1370558375634519,
"grad_norm": 1.4512771368026733,
"learning_rate": 0.0004971822543018662,
"loss": 2.0285,
"step": 70
},
{
"epoch": 1.218274111675127,
"grad_norm": 3.769318103790283,
"learning_rate": 0.0004956850661665511,
"loss": 2.1148,
"step": 75
},
{
"epoch": 1.218274111675127,
"eval_loss": 2.073261260986328,
"eval_runtime": 89.1696,
"eval_samples_per_second": 11.215,
"eval_steps_per_second": 1.402,
"step": 75
},
{
"epoch": 1.299492385786802,
"grad_norm": 5.518929481506348,
"learning_rate": 0.0004938733617467517,
"loss": 2.2383,
"step": 80
},
{
"epoch": 1.380710659898477,
"grad_norm": 3.9714841842651367,
"learning_rate": 0.0004917494603146632,
"loss": 2.168,
"step": 85
},
{
"epoch": 1.4619289340101522,
"grad_norm": 6.865543365478516,
"learning_rate": 0.0004893160808047222,
"loss": 2.1865,
"step": 90
},
{
"epoch": 1.5431472081218274,
"grad_norm": 1.2264541387557983,
"learning_rate": 0.00048657633833293557,
"loss": 2.1268,
"step": 95
},
{
"epoch": 1.6243654822335025,
"grad_norm": 1.3554304838180542,
"learning_rate": 0.0004835337402090316,
"loss": 1.9755,
"step": 100
},
{
"epoch": 1.6243654822335025,
"eval_loss": 1.8600231409072876,
"eval_runtime": 89.191,
"eval_samples_per_second": 11.212,
"eval_steps_per_second": 1.401,
"step": 100
},
{
"epoch": 1.7055837563451777,
"grad_norm": 0.7942481637001038,
"learning_rate": 0.0004801921814465414,
"loss": 1.9525,
"step": 105
},
{
"epoch": 1.7868020304568528,
"grad_norm": 0.5718096494674683,
"learning_rate": 0.00047655593977655674,
"loss": 1.9006,
"step": 110
},
{
"epoch": 1.868020304568528,
"grad_norm": 1.3327739238739014,
"learning_rate": 0.0004726296701715489,
"loss": 1.8652,
"step": 115
},
{
"epoch": 1.9492385786802031,
"grad_norm": 1.577561616897583,
"learning_rate": 0.00046841839888625623,
"loss": 1.8377,
"step": 120
},
{
"epoch": 2.030456852791878,
"grad_norm": 0.5204086899757385,
"learning_rate": 0.0004639275170232734,
"loss": 1.8577,
"step": 125
},
{
"epoch": 2.030456852791878,
"eval_loss": 1.7442234754562378,
"eval_runtime": 89.0663,
"eval_samples_per_second": 11.228,
"eval_steps_per_second": 1.403,
"step": 125
},
{
"epoch": 2.1116751269035534,
"grad_norm": 2.453739643096924,
"learning_rate": 0.0004591627736315743,
"loss": 1.8342,
"step": 130
},
{
"epoch": 2.1928934010152283,
"grad_norm": 0.7246991395950317,
"learning_rate": 0.0004541302683468084,
"loss": 1.7917,
"step": 135
},
{
"epoch": 2.2741116751269037,
"grad_norm": 0.6339123845100403,
"learning_rate": 0.0004488364435827881,
"loss": 1.82,
"step": 140
},
{
"epoch": 2.3553299492385786,
"grad_norm": 0.36034825444221497,
"learning_rate": 0.00044328807628416644,
"loss": 1.8474,
"step": 145
},
{
"epoch": 2.436548223350254,
"grad_norm": 0.30923303961753845,
"learning_rate": 0.0004374922692508611,
"loss": 1.8128,
"step": 150
},
{
"epoch": 2.436548223350254,
"eval_loss": 1.7244406938552856,
"eval_runtime": 89.0785,
"eval_samples_per_second": 11.226,
"eval_steps_per_second": 1.403,
"step": 150
},
{
"epoch": 2.517766497461929,
"grad_norm": 0.3758034110069275,
"learning_rate": 0.0004314564420453311,
"loss": 1.8011,
"step": 155
},
{
"epoch": 2.598984771573604,
"grad_norm": 0.9316683411598206,
"learning_rate": 0.0004251883214943475,
"loss": 1.7757,
"step": 160
},
{
"epoch": 2.6802030456852792,
"grad_norm": 0.5500881671905518,
"learning_rate": 0.0004186959317974155,
"loss": 1.7824,
"step": 165
},
{
"epoch": 2.761421319796954,
"grad_norm": 0.3643234372138977,
"learning_rate": 0.00041198758425451266,
"loss": 1.7431,
"step": 170
},
{
"epoch": 2.8426395939086295,
"grad_norm": 0.5587167739868164,
"learning_rate": 0.00040507186662629185,
"loss": 1.7935,
"step": 175
},
{
"epoch": 2.8426395939086295,
"eval_loss": 1.7120025157928467,
"eval_runtime": 89.075,
"eval_samples_per_second": 11.226,
"eval_steps_per_second": 1.403,
"step": 175
},
{
"epoch": 2.9238578680203045,
"grad_norm": 0.44406819343566895,
"learning_rate": 0.0003979576321403705,
"loss": 1.7312,
"step": 180
},
{
"epoch": 3.00507614213198,
"grad_norm": 0.7630233764648438,
"learning_rate": 0.0003906539881577793,
"loss": 1.8026,
"step": 185
},
{
"epoch": 3.0862944162436547,
"grad_norm": 0.41732481122016907,
"learning_rate": 0.0003831702845140801,
"loss": 1.7905,
"step": 190
},
{
"epoch": 3.16751269035533,
"grad_norm": 0.522934079170227,
"learning_rate": 0.00037551610155007613,
"loss": 1.7802,
"step": 195
},
{
"epoch": 3.248730964467005,
"grad_norm": 0.5810943841934204,
"learning_rate": 0.00036770123784744027,
"loss": 1.7463,
"step": 200
},
{
"epoch": 3.248730964467005,
"eval_loss": 1.7069350481033325,
"eval_runtime": 89.0777,
"eval_samples_per_second": 11.226,
"eval_steps_per_second": 1.403,
"step": 200
},
{
"epoch": 3.3299492385786804,
"grad_norm": 0.1446053683757782,
"learning_rate": 0.00035973569768495855,
"loss": 1.7338,
"step": 205
},
{
"epoch": 3.4111675126903553,
"grad_norm": 0.5621339678764343,
"learning_rate": 0.0003516296782314491,
"loss": 1.7331,
"step": 210
},
{
"epoch": 3.4923857868020303,
"grad_norm": 0.25368839502334595,
"learning_rate": 0.00034339355649175095,
"loss": 1.8015,
"step": 215
},
{
"epoch": 3.5736040609137056,
"grad_norm": 0.12756070494651794,
"learning_rate": 0.00033503787602249364,
"loss": 1.7295,
"step": 220
},
{
"epoch": 3.6548223350253806,
"grad_norm": 0.265066921710968,
"learning_rate": 0.00032657333343465356,
"loss": 1.7455,
"step": 225
},
{
"epoch": 3.6548223350253806,
"eval_loss": 1.726246953010559,
"eval_runtime": 89.0736,
"eval_samples_per_second": 11.227,
"eval_steps_per_second": 1.403,
"step": 225
},
{
"epoch": 3.736040609137056,
"grad_norm": 1.8155518770217896,
"learning_rate": 0.0003180107647001769,
"loss": 1.7157,
"step": 230
},
{
"epoch": 3.817258883248731,
"grad_norm": 0.34362295269966125,
"learning_rate": 0.0003093611312801979,
"loss": 1.7732,
"step": 235
},
{
"epoch": 3.8984771573604062,
"grad_norm": 0.4821447432041168,
"learning_rate": 0.00030063550609261025,
"loss": 1.7483,
"step": 240
},
{
"epoch": 3.979695431472081,
"grad_norm": 0.359451562166214,
"learning_rate": 0.000291845059336957,
"loss": 1.7725,
"step": 245
},
{
"epoch": 4.060913705583756,
"grad_norm": 27.946842193603516,
"learning_rate": 0.0002830010441947834,
"loss": 2.2729,
"step": 250
},
{
"epoch": 4.060913705583756,
"eval_loss": 9.357398986816406,
"eval_runtime": 89.1669,
"eval_samples_per_second": 11.215,
"eval_steps_per_second": 1.402,
"step": 250
},
{
"epoch": 4.1421319796954315,
"grad_norm": 0.8487386107444763,
"learning_rate": 0.00027411478242376017,
"loss": 3.5769,
"step": 255
},
{
"epoch": 4.223350253807107,
"grad_norm": 2.305914878845215,
"learning_rate": 0.00026519764986401774,
"loss": 1.824,
"step": 260
},
{
"epoch": 4.304568527918782,
"grad_norm": 0.5802918076515198,
"learning_rate": 0.000256261061875247,
"loss": 1.7698,
"step": 265
},
{
"epoch": 4.385786802030457,
"grad_norm": 0.5246661901473999,
"learning_rate": 0.0002473164587232079,
"loss": 1.8066,
"step": 270
},
{
"epoch": 4.467005076142132,
"grad_norm": 0.15261879563331604,
"learning_rate": 0.0002383752909343547,
"loss": 1.7924,
"step": 275
},
{
"epoch": 4.467005076142132,
"eval_loss": 1.708724021911621,
"eval_runtime": 89.1842,
"eval_samples_per_second": 11.213,
"eval_steps_per_second": 1.402,
"step": 275
},
{
"epoch": 4.548223350253807,
"grad_norm": 0.34460264444351196,
"learning_rate": 0.0002294490046373259,
"loss": 1.7864,
"step": 280
},
{
"epoch": 4.629441624365482,
"grad_norm": 0.2242611199617386,
"learning_rate": 0.00022054902691006405,
"loss": 1.788,
"step": 285
},
{
"epoch": 4.710659898477157,
"grad_norm": 0.19834347069263458,
"learning_rate": 0.00021168675115132315,
"loss": 1.7513,
"step": 290
},
{
"epoch": 4.791878172588833,
"grad_norm": 0.2671795189380646,
"learning_rate": 0.00020287352249529153,
"loss": 1.786,
"step": 295
},
{
"epoch": 4.873096446700508,
"grad_norm": 0.10885637998580933,
"learning_rate": 0.00019412062328800044,
"loss": 1.7419,
"step": 300
},
{
"epoch": 4.873096446700508,
"eval_loss": 1.6944421529769897,
"eval_runtime": 89.2057,
"eval_samples_per_second": 11.21,
"eval_steps_per_second": 1.401,
"step": 300
},
{
"epoch": 4.9543147208121825,
"grad_norm": 0.14026451110839844,
"learning_rate": 0.000185439258644112,
"loss": 1.7621,
"step": 305
},
{
"epoch": 5.035532994923858,
"grad_norm": 0.24394521117210388,
"learning_rate": 0.00017684054210257517,
"loss": 1.7105,
"step": 310
},
{
"epoch": 5.116751269035533,
"grad_norm": 0.17971426248550415,
"learning_rate": 0.00016833548139951395,
"loss": 1.7346,
"step": 315
},
{
"epoch": 5.197969543147208,
"grad_norm": 0.17999742925167084,
"learning_rate": 0.0001599349643765599,
"loss": 1.7767,
"step": 320
},
{
"epoch": 5.279187817258883,
"grad_norm": 0.10608798265457153,
"learning_rate": 0.0001516497450426686,
"loss": 1.8044,
"step": 325
},
{
"epoch": 5.279187817258883,
"eval_loss": 1.6952660083770752,
"eval_runtime": 89.0986,
"eval_samples_per_second": 11.224,
"eval_steps_per_second": 1.403,
"step": 325
},
{
"epoch": 5.3604060913705585,
"grad_norm": 0.09113989025354385,
"learning_rate": 0.00014349042980726362,
"loss": 1.7125,
"step": 330
},
{
"epoch": 5.441624365482234,
"grad_norm": 0.11321850121021271,
"learning_rate": 0.0001354674639023318,
"loss": 1.7597,
"step": 335
},
{
"epoch": 5.522842639593908,
"grad_norm": 0.1310632973909378,
"learning_rate": 0.00012759111801085066,
"loss": 1.7256,
"step": 340
},
{
"epoch": 5.604060913705584,
"grad_norm": 0.10128358006477356,
"learning_rate": 0.00011987147511866788,
"loss": 1.7871,
"step": 345
},
{
"epoch": 5.685279187817259,
"grad_norm": 0.3035777807235718,
"learning_rate": 0.00011231841760666186,
"loss": 1.7318,
"step": 350
},
{
"epoch": 5.685279187817259,
"eval_loss": 1.688687801361084,
"eval_runtime": 89.0633,
"eval_samples_per_second": 11.228,
"eval_steps_per_second": 1.403,
"step": 350
},
{
"epoch": 5.7664974619289335,
"grad_norm": 0.12795297801494598,
"learning_rate": 0.0001049416145997094,
"loss": 1.7698,
"step": 355
},
{
"epoch": 5.847715736040609,
"grad_norm": 0.15086208283901215,
"learning_rate": 9.775050958865584e-05,
"loss": 1.7826,
"step": 360
},
{
"epoch": 5.928934010152284,
"grad_norm": 0.14251065254211426,
"learning_rate": 9.075430834113152e-05,
"loss": 1.6859,
"step": 365
},
{
"epoch": 6.01015228426396,
"grad_norm": 0.0902705043554306,
"learning_rate": 8.396196711669335e-05,
"loss": 1.7259,
"step": 370
},
{
"epoch": 6.091370558375634,
"grad_norm": 0.23637209832668304,
"learning_rate": 7.738218120137671e-05,
"loss": 1.7052,
"step": 375
},
{
"epoch": 6.091370558375634,
"eval_loss": 1.6875873804092407,
"eval_runtime": 89.1637,
"eval_samples_per_second": 11.215,
"eval_steps_per_second": 1.402,
"step": 375
},
{
"epoch": 6.1725888324873095,
"grad_norm": 0.07396359741687775,
"learning_rate": 7.102337377633394e-05,
"loss": 1.7372,
"step": 380
},
{
"epoch": 6.253807106598985,
"grad_norm": 0.21548880636692047,
"learning_rate": 6.489368513481228e-05,
"loss": 1.7448,
"step": 385
},
{
"epoch": 6.33502538071066,
"grad_norm": 0.19514048099517822,
"learning_rate": 5.9000962261273136e-05,
"loss": 1.7359,
"step": 390
},
{
"epoch": 6.416243654822335,
"grad_norm": 0.16353292763233185,
"learning_rate": 5.3352748785993164e-05,
"loss": 1.7747,
"step": 395
},
{
"epoch": 6.49746192893401,
"grad_norm": 0.19792306423187256,
"learning_rate": 4.795627532800806e-05,
"loss": 1.7363,
"step": 400
},
{
"epoch": 6.49746192893401,
"eval_loss": 1.6819108724594116,
"eval_runtime": 89.1529,
"eval_samples_per_second": 11.217,
"eval_steps_per_second": 1.402,
"step": 400
},
{
"epoch": 6.5786802030456855,
"grad_norm": 0.08929474651813507,
"learning_rate": 4.281845023876074e-05,
"loss": 1.6744,
"step": 405
},
{
"epoch": 6.659898477157361,
"grad_norm": 0.09701024740934372,
"learning_rate": 3.794585075830329e-05,
"loss": 1.7144,
"step": 410
},
{
"epoch": 6.741116751269035,
"grad_norm": 0.06863227486610413,
"learning_rate": 3.334471459537497e-05,
"loss": 1.7863,
"step": 415
},
{
"epoch": 6.822335025380711,
"grad_norm": 0.07492458820343018,
"learning_rate": 2.902093194213526e-05,
"loss": 1.7385,
"step": 420
},
{
"epoch": 6.903553299492386,
"grad_norm": 0.09851057827472687,
"learning_rate": 2.4980037933772488e-05,
"loss": 1.7655,
"step": 425
},
{
"epoch": 6.903553299492386,
"eval_loss": 1.6825205087661743,
"eval_runtime": 89.1368,
"eval_samples_per_second": 11.219,
"eval_steps_per_second": 1.402,
"step": 425
},
{
"epoch": 6.9847715736040605,
"grad_norm": 0.07320298254489899,
"learning_rate": 2.122720556264357e-05,
"loss": 1.7219,
"step": 430
},
{
"epoch": 7.065989847715736,
"grad_norm": 0.07251249998807907,
"learning_rate": 1.776723905601438e-05,
"loss": 1.7049,
"step": 435
},
{
"epoch": 7.147208121827411,
"grad_norm": 0.07650978118181229,
"learning_rate": 1.4604567725877926e-05,
"loss": 1.7445,
"step": 440
},
{
"epoch": 7.228426395939087,
"grad_norm": 0.0650210827589035,
"learning_rate": 1.1743240298725116e-05,
"loss": 1.7892,
"step": 445
},
{
"epoch": 7.309644670050761,
"grad_norm": 0.06279773265123367,
"learning_rate": 9.18691973252539e-06,
"loss": 1.747,
"step": 450
},
{
"epoch": 7.309644670050761,
"eval_loss": 1.6831755638122559,
"eval_runtime": 89.0915,
"eval_samples_per_second": 11.224,
"eval_steps_per_second": 1.403,
"step": 450
},
{
"epoch": 7.3908629441624365,
"grad_norm": 0.0952860563993454,
"learning_rate": 6.938878527553066e-06,
"loss": 1.7054,
"step": 455
},
{
"epoch": 7.472081218274112,
"grad_norm": 0.07091079652309418,
"learning_rate": 5.001994537062265e-06,
"loss": 1.7471,
"step": 460
},
{
"epoch": 7.553299492385786,
"grad_norm": 0.07213548570871353,
"learning_rate": 3.3787472831732225e-06,
"loss": 1.7511,
"step": 465
},
{
"epoch": 7.634517766497462,
"grad_norm": 0.06398265808820724,
"learning_rate": 2.071214782686265e-06,
"loss": 1.6653,
"step": 470
},
{
"epoch": 7.715736040609137,
"grad_norm": 0.06733715534210205,
"learning_rate": 1.0810708868871643e-06,
"loss": 1.7363,
"step": 475
},
{
"epoch": 7.715736040609137,
"eval_loss": 1.6808745861053467,
"eval_runtime": 89.164,
"eval_samples_per_second": 11.215,
"eval_steps_per_second": 1.402,
"step": 475
}
],
"logging_steps": 5,
"max_steps": 488,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.663017499145011e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}