htlou's picture
Upload folder using huggingface_hub
b8a5b49 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.8561484918793503,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02320185614849188,
"grad_norm": 26.745355095184312,
"learning_rate": 5e-07,
"loss": 1.6569,
"step": 5
},
{
"epoch": 0.04640371229698376,
"grad_norm": 13.080188660738868,
"learning_rate": 1e-06,
"loss": 1.5099,
"step": 10
},
{
"epoch": 0.06960556844547564,
"grad_norm": 7.775505635686351,
"learning_rate": 9.998470286265414e-07,
"loss": 1.2782,
"step": 15
},
{
"epoch": 0.09280742459396751,
"grad_norm": 6.492859712313467,
"learning_rate": 9.993882081071305e-07,
"loss": 1.2359,
"step": 20
},
{
"epoch": 0.11600928074245939,
"grad_norm": 5.355551984540261,
"learning_rate": 9.986238191873872e-07,
"loss": 1.204,
"step": 25
},
{
"epoch": 0.13921113689095127,
"grad_norm": 5.125393165293572,
"learning_rate": 9.975543295858033e-07,
"loss": 1.1627,
"step": 30
},
{
"epoch": 0.16241299303944315,
"grad_norm": 4.710071299991241,
"learning_rate": 9.961803937075514e-07,
"loss": 1.1463,
"step": 35
},
{
"epoch": 0.18561484918793503,
"grad_norm": 5.029376799191572,
"learning_rate": 9.945028522440653e-07,
"loss": 1.1394,
"step": 40
},
{
"epoch": 0.2088167053364269,
"grad_norm": 4.763291242870039,
"learning_rate": 9.925227316586314e-07,
"loss": 1.1371,
"step": 45
},
{
"epoch": 0.23201856148491878,
"grad_norm": 4.903033982523367,
"learning_rate": 9.902412435583125e-07,
"loss": 1.1181,
"step": 50
},
{
"epoch": 0.23201856148491878,
"eval_loss": 1.1214605569839478,
"eval_runtime": 105.8329,
"eval_samples_per_second": 57.912,
"eval_steps_per_second": 0.907,
"step": 50
},
{
"epoch": 0.2552204176334107,
"grad_norm": 4.785575700738186,
"learning_rate": 9.876597839525813e-07,
"loss": 1.1163,
"step": 55
},
{
"epoch": 0.27842227378190254,
"grad_norm": 4.600727000401806,
"learning_rate": 9.847799323991233e-07,
"loss": 1.1232,
"step": 60
},
{
"epoch": 0.30162412993039445,
"grad_norm": 4.8200416840356315,
"learning_rate": 9.816034510373285e-07,
"loss": 1.125,
"step": 65
},
{
"epoch": 0.3248259860788863,
"grad_norm": 4.958997518359378,
"learning_rate": 9.781322835100637e-07,
"loss": 1.108,
"step": 70
},
{
"epoch": 0.3480278422273782,
"grad_norm": 4.883541365508776,
"learning_rate": 9.743685537743856e-07,
"loss": 1.106,
"step": 75
},
{
"epoch": 0.37122969837587005,
"grad_norm": 4.973507458353338,
"learning_rate": 9.70314564801922e-07,
"loss": 1.0973,
"step": 80
},
{
"epoch": 0.39443155452436196,
"grad_norm": 4.704415990191669,
"learning_rate": 9.659727971697173e-07,
"loss": 1.0964,
"step": 85
},
{
"epoch": 0.4176334106728538,
"grad_norm": 4.759885977268913,
"learning_rate": 9.613459075424033e-07,
"loss": 1.0956,
"step": 90
},
{
"epoch": 0.4408352668213457,
"grad_norm": 4.868535908803129,
"learning_rate": 9.564367270466245e-07,
"loss": 1.0787,
"step": 95
},
{
"epoch": 0.46403712296983757,
"grad_norm": 5.180286116736628,
"learning_rate": 9.51248259538713e-07,
"loss": 1.0765,
"step": 100
},
{
"epoch": 0.46403712296983757,
"eval_loss": 1.0775035619735718,
"eval_runtime": 105.5293,
"eval_samples_per_second": 58.079,
"eval_steps_per_second": 0.91,
"step": 100
},
{
"epoch": 0.4872389791183295,
"grad_norm": 5.290465762761348,
"learning_rate": 9.457836797666721e-07,
"loss": 1.0903,
"step": 105
},
{
"epoch": 0.5104408352668214,
"grad_norm": 4.81291157554945,
"learning_rate": 9.400463314275941e-07,
"loss": 1.0697,
"step": 110
},
{
"epoch": 0.5336426914153132,
"grad_norm": 4.914554202012043,
"learning_rate": 9.340397251217008e-07,
"loss": 1.0668,
"step": 115
},
{
"epoch": 0.5568445475638051,
"grad_norm": 5.240457841494325,
"learning_rate": 9.27767536204258e-07,
"loss": 1.0676,
"step": 120
},
{
"epoch": 0.580046403712297,
"grad_norm": 4.957459385263701,
"learning_rate": 9.212336025366787e-07,
"loss": 1.0746,
"step": 125
},
{
"epoch": 0.6032482598607889,
"grad_norm": 5.29032668711839,
"learning_rate": 9.144419221381918e-07,
"loss": 1.0724,
"step": 130
},
{
"epoch": 0.6264501160092807,
"grad_norm": 4.908560953587426,
"learning_rate": 9.073966507395121e-07,
"loss": 1.0745,
"step": 135
},
{
"epoch": 0.6496519721577726,
"grad_norm": 4.912842113728852,
"learning_rate": 9.001020992400085e-07,
"loss": 1.0559,
"step": 140
},
{
"epoch": 0.6728538283062645,
"grad_norm": 5.088585906783296,
"learning_rate": 8.925627310699274e-07,
"loss": 1.0705,
"step": 145
},
{
"epoch": 0.6960556844547564,
"grad_norm": 5.140684832177941,
"learning_rate": 8.84783159459285e-07,
"loss": 1.0639,
"step": 150
},
{
"epoch": 0.6960556844547564,
"eval_loss": 1.0501643419265747,
"eval_runtime": 105.4561,
"eval_samples_per_second": 58.119,
"eval_steps_per_second": 0.91,
"step": 150
},
{
"epoch": 0.7192575406032483,
"grad_norm": 5.311257433234373,
"learning_rate": 8.767681446150976e-07,
"loss": 1.0472,
"step": 155
},
{
"epoch": 0.7424593967517401,
"grad_norm": 5.091539509688025,
"learning_rate": 8.68522590808682e-07,
"loss": 1.0645,
"step": 160
},
{
"epoch": 0.765661252900232,
"grad_norm": 5.132013982763288,
"learning_rate": 8.600515433748001e-07,
"loss": 1.0416,
"step": 165
},
{
"epoch": 0.7888631090487239,
"grad_norm": 4.753354098230195,
"learning_rate": 8.51360185624495e-07,
"loss": 1.0478,
"step": 170
},
{
"epoch": 0.8120649651972158,
"grad_norm": 5.029473978539478,
"learning_rate": 8.424538356734956e-07,
"loss": 1.0383,
"step": 175
},
{
"epoch": 0.8352668213457076,
"grad_norm": 4.9588553004593345,
"learning_rate": 8.333379431881397e-07,
"loss": 1.0342,
"step": 180
},
{
"epoch": 0.8584686774941995,
"grad_norm": 5.234591483099779,
"learning_rate": 8.240180860508026e-07,
"loss": 1.0413,
"step": 185
},
{
"epoch": 0.8816705336426914,
"grad_norm": 5.121566469508508,
"learning_rate": 8.144999669468713e-07,
"loss": 1.0264,
"step": 190
},
{
"epoch": 0.9048723897911833,
"grad_norm": 5.0479045768726305,
"learning_rate": 8.047894098753539e-07,
"loss": 1.028,
"step": 195
},
{
"epoch": 0.9280742459396751,
"grad_norm": 5.0838098259091185,
"learning_rate": 7.948923565852597e-07,
"loss": 1.0308,
"step": 200
},
{
"epoch": 0.9280742459396751,
"eval_loss": 1.0281875133514404,
"eval_runtime": 105.8568,
"eval_samples_per_second": 57.899,
"eval_steps_per_second": 0.907,
"step": 200
},
{
"epoch": 0.951276102088167,
"grad_norm": 5.3244675969022826,
"learning_rate": 7.848148629399285e-07,
"loss": 1.0262,
"step": 205
},
{
"epoch": 0.974477958236659,
"grad_norm": 4.9307215762355305,
"learning_rate": 7.745630952115363e-07,
"loss": 1.0349,
"step": 210
},
{
"epoch": 0.9976798143851509,
"grad_norm": 4.994203203030838,
"learning_rate": 7.641433263080418e-07,
"loss": 1.0216,
"step": 215
},
{
"epoch": 1.0208816705336428,
"grad_norm": 5.112958880673586,
"learning_rate": 7.535619319348865e-07,
"loss": 0.9241,
"step": 220
},
{
"epoch": 1.0440835266821347,
"grad_norm": 5.264187445397404,
"learning_rate": 7.428253866937918e-07,
"loss": 0.9001,
"step": 225
},
{
"epoch": 1.0672853828306264,
"grad_norm": 5.645584402922182,
"learning_rate": 7.319402601210447e-07,
"loss": 0.8916,
"step": 230
},
{
"epoch": 1.0904872389791183,
"grad_norm": 5.655360994963379,
"learning_rate": 7.209132126676933e-07,
"loss": 0.8876,
"step": 235
},
{
"epoch": 1.1136890951276102,
"grad_norm": 5.3773890810778795,
"learning_rate": 7.097509916241145e-07,
"loss": 0.8931,
"step": 240
},
{
"epoch": 1.136890951276102,
"grad_norm": 5.658881203794,
"learning_rate": 6.984604269914436e-07,
"loss": 0.905,
"step": 245
},
{
"epoch": 1.160092807424594,
"grad_norm": 5.966282577193694,
"learning_rate": 6.870484273023967e-07,
"loss": 0.9038,
"step": 250
},
{
"epoch": 1.160092807424594,
"eval_loss": 1.0220295190811157,
"eval_runtime": 105.8362,
"eval_samples_per_second": 57.91,
"eval_steps_per_second": 0.907,
"step": 250
},
{
"epoch": 1.1832946635730859,
"grad_norm": 5.794176185315156,
"learning_rate": 6.755219753940388e-07,
"loss": 0.8964,
"step": 255
},
{
"epoch": 1.2064965197215778,
"grad_norm": 6.603391500331007,
"learning_rate": 6.638881241350883e-07,
"loss": 0.8898,
"step": 260
},
{
"epoch": 1.2296983758700697,
"grad_norm": 5.5914639272443205,
"learning_rate": 6.52153992110368e-07,
"loss": 0.8951,
"step": 265
},
{
"epoch": 1.2529002320185616,
"grad_norm": 5.339661007608592,
"learning_rate": 6.403267592650466e-07,
"loss": 0.8961,
"step": 270
},
{
"epoch": 1.2761020881670533,
"grad_norm": 5.448280965038798,
"learning_rate": 6.28413662511334e-07,
"loss": 0.8919,
"step": 275
},
{
"epoch": 1.2993039443155452,
"grad_norm": 5.476822697700394,
"learning_rate": 6.164219913003207e-07,
"loss": 0.8931,
"step": 280
},
{
"epoch": 1.322505800464037,
"grad_norm": 5.783548079343189,
"learning_rate": 6.043590831616676e-07,
"loss": 0.8792,
"step": 285
},
{
"epoch": 1.345707656612529,
"grad_norm": 5.59782698134665,
"learning_rate": 5.92232319213878e-07,
"loss": 0.8768,
"step": 290
},
{
"epoch": 1.368909512761021,
"grad_norm": 5.193853086769952,
"learning_rate": 5.800491196478988e-07,
"loss": 0.8788,
"step": 295
},
{
"epoch": 1.3921113689095128,
"grad_norm": 5.539347488257,
"learning_rate": 5.678169391868127e-07,
"loss": 0.8973,
"step": 300
},
{
"epoch": 1.3921113689095128,
"eval_loss": 1.0114275217056274,
"eval_runtime": 106.216,
"eval_samples_per_second": 57.703,
"eval_steps_per_second": 0.904,
"step": 300
},
{
"epoch": 1.4153132250580047,
"grad_norm": 5.567338787725618,
"learning_rate": 5.555432625244023e-07,
"loss": 0.8831,
"step": 305
},
{
"epoch": 1.4385150812064964,
"grad_norm": 5.412598997121907,
"learning_rate": 5.432355997453728e-07,
"loss": 0.8848,
"step": 310
},
{
"epoch": 1.4617169373549883,
"grad_norm": 5.4356510240439775,
"learning_rate": 5.309014817300421e-07,
"loss": 0.8999,
"step": 315
},
{
"epoch": 1.4849187935034802,
"grad_norm": 5.525607667062919,
"learning_rate": 5.185484555463026e-07,
"loss": 0.8901,
"step": 320
},
{
"epoch": 1.5081206496519721,
"grad_norm": 5.583006624663847,
"learning_rate": 5.061840798316814e-07,
"loss": 0.8909,
"step": 325
},
{
"epoch": 1.531322505800464,
"grad_norm": 5.822776934487761,
"learning_rate": 4.938159201683186e-07,
"loss": 0.8829,
"step": 330
},
{
"epoch": 1.554524361948956,
"grad_norm": 5.427885443572571,
"learning_rate": 4.814515444536974e-07,
"loss": 0.8867,
"step": 335
},
{
"epoch": 1.5777262180974478,
"grad_norm": 5.513594905050496,
"learning_rate": 4.69098518269958e-07,
"loss": 0.892,
"step": 340
},
{
"epoch": 1.6009280742459397,
"grad_norm": 5.785273130658459,
"learning_rate": 4.5676440025462726e-07,
"loss": 0.8775,
"step": 345
},
{
"epoch": 1.6241299303944317,
"grad_norm": 5.494906178164733,
"learning_rate": 4.444567374755977e-07,
"loss": 0.8747,
"step": 350
},
{
"epoch": 1.6241299303944317,
"eval_loss": 1.0039345026016235,
"eval_runtime": 105.8025,
"eval_samples_per_second": 57.929,
"eval_steps_per_second": 0.907,
"step": 350
},
{
"epoch": 1.6473317865429236,
"grad_norm": 5.49251321009188,
"learning_rate": 4.3218306081318713e-07,
"loss": 0.884,
"step": 355
},
{
"epoch": 1.6705336426914155,
"grad_norm": 5.618196571147986,
"learning_rate": 4.199508803521012e-07,
"loss": 0.8945,
"step": 360
},
{
"epoch": 1.6937354988399071,
"grad_norm": 5.908619967180135,
"learning_rate": 4.0776768078612207e-07,
"loss": 0.8793,
"step": 365
},
{
"epoch": 1.716937354988399,
"grad_norm": 5.822870470090775,
"learning_rate": 3.9564091683833244e-07,
"loss": 0.8785,
"step": 370
},
{
"epoch": 1.740139211136891,
"grad_norm": 5.685661727934108,
"learning_rate": 3.835780086996793e-07,
"loss": 0.8772,
"step": 375
},
{
"epoch": 1.7633410672853829,
"grad_norm": 5.692617224399981,
"learning_rate": 3.7158633748866607e-07,
"loss": 0.8701,
"step": 380
},
{
"epoch": 1.7865429234338746,
"grad_norm": 5.6393286368292355,
"learning_rate": 3.596732407349536e-07,
"loss": 0.871,
"step": 385
},
{
"epoch": 1.8097447795823665,
"grad_norm": 5.672110777773612,
"learning_rate": 3.4784600788963193e-07,
"loss": 0.8751,
"step": 390
},
{
"epoch": 1.8329466357308584,
"grad_norm": 5.835389029793195,
"learning_rate": 3.3611187586491157e-07,
"loss": 0.8687,
"step": 395
},
{
"epoch": 1.8561484918793503,
"grad_norm": 5.386053225084359,
"learning_rate": 3.244780246059612e-07,
"loss": 0.8818,
"step": 400
},
{
"epoch": 1.8561484918793503,
"eval_loss": 0.996471107006073,
"eval_runtime": 105.8008,
"eval_samples_per_second": 57.93,
"eval_steps_per_second": 0.907,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 645,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2358347349098496.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}