prxy5605's picture
Training in progress, epoch 0, checkpoint
27db7df verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.048856453632171976,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00012214113408042993,
"eval_loss": 11.5,
"eval_runtime": 22.3392,
"eval_samples_per_second": 154.348,
"eval_steps_per_second": 77.174,
"step": 1
},
{
"epoch": 0.0006107056704021497,
"grad_norm": 5.640897143166512e-05,
"learning_rate": 1.6666666666666667e-05,
"loss": 46.0,
"step": 5
},
{
"epoch": 0.0012214113408042994,
"grad_norm": 7.911106513347477e-05,
"learning_rate": 3.3333333333333335e-05,
"loss": 46.0,
"step": 10
},
{
"epoch": 0.001832117011206449,
"grad_norm": 7.98892870079726e-05,
"learning_rate": 5e-05,
"loss": 46.0,
"step": 15
},
{
"epoch": 0.002442822681608599,
"grad_norm": 9.459878492634743e-05,
"learning_rate": 6.666666666666667e-05,
"loss": 46.0,
"step": 20
},
{
"epoch": 0.0030535283520107485,
"grad_norm": 7.22497861715965e-05,
"learning_rate": 8.333333333333334e-05,
"loss": 46.0,
"step": 25
},
{
"epoch": 0.003664234022412898,
"grad_norm": 7.780244777677581e-05,
"learning_rate": 0.0001,
"loss": 46.0,
"step": 30
},
{
"epoch": 0.004274939692815048,
"grad_norm": 8.012832404347137e-05,
"learning_rate": 9.995494831023409e-05,
"loss": 46.0,
"step": 35
},
{
"epoch": 0.004885645363217198,
"grad_norm": 8.905514550860971e-05,
"learning_rate": 9.981987442712633e-05,
"loss": 46.0,
"step": 40
},
{
"epoch": 0.005496351033619347,
"grad_norm": 8.750787674216554e-05,
"learning_rate": 9.959502176294383e-05,
"loss": 46.0,
"step": 45
},
{
"epoch": 0.006107056704021497,
"grad_norm": 9.322659025201574e-05,
"learning_rate": 9.928079551738543e-05,
"loss": 46.0,
"step": 50
},
{
"epoch": 0.006717762374423646,
"grad_norm": 7.013502181507647e-05,
"learning_rate": 9.887776194738432e-05,
"loss": 46.0,
"step": 55
},
{
"epoch": 0.007328468044825796,
"grad_norm": 7.971763261593878e-05,
"learning_rate": 9.838664734667495e-05,
"loss": 46.0,
"step": 60
},
{
"epoch": 0.007939173715227945,
"grad_norm": 8.508894825354218e-05,
"learning_rate": 9.780833673696254e-05,
"loss": 46.0,
"step": 65
},
{
"epoch": 0.008549879385630095,
"grad_norm": 9.751072502695024e-05,
"learning_rate": 9.714387227305422e-05,
"loss": 46.0,
"step": 70
},
{
"epoch": 0.009160585056032245,
"grad_norm": 0.00010631086479406804,
"learning_rate": 9.639445136482548e-05,
"loss": 46.0,
"step": 75
},
{
"epoch": 0.009771290726434395,
"grad_norm": 0.00012184488878119737,
"learning_rate": 9.55614245194068e-05,
"loss": 46.0,
"step": 80
},
{
"epoch": 0.010381996396836544,
"grad_norm": 9.975822467822582e-05,
"learning_rate": 9.464629290747842e-05,
"loss": 46.0,
"step": 85
},
{
"epoch": 0.010992702067238694,
"grad_norm": 0.00011803951201727614,
"learning_rate": 9.365070565805941e-05,
"loss": 46.0,
"step": 90
},
{
"epoch": 0.011603407737640844,
"grad_norm": 0.00013076045433990657,
"learning_rate": 9.257645688666556e-05,
"loss": 46.0,
"step": 95
},
{
"epoch": 0.012214113408042994,
"grad_norm": 0.00011885855928994715,
"learning_rate": 9.142548246219212e-05,
"loss": 46.0,
"step": 100
},
{
"epoch": 0.012214113408042994,
"eval_loss": 11.5,
"eval_runtime": 22.3431,
"eval_samples_per_second": 154.32,
"eval_steps_per_second": 77.16,
"step": 100
},
{
"epoch": 0.012824819078445144,
"grad_norm": 8.25794049887918e-05,
"learning_rate": 9.019985651834703e-05,
"loss": 46.0,
"step": 105
},
{
"epoch": 0.013435524748847292,
"grad_norm": 0.0001379641325911507,
"learning_rate": 8.890178771592199e-05,
"loss": 46.0,
"step": 110
},
{
"epoch": 0.014046230419249442,
"grad_norm": 0.00014777411706745625,
"learning_rate": 8.753361526263621e-05,
"loss": 46.0,
"step": 115
},
{
"epoch": 0.014656936089651592,
"grad_norm": 0.00013291859067976475,
"learning_rate": 8.609780469772623e-05,
"loss": 46.0,
"step": 120
},
{
"epoch": 0.015267641760053742,
"grad_norm": 0.0001347983634332195,
"learning_rate": 8.459694344887732e-05,
"loss": 46.0,
"step": 125
},
{
"epoch": 0.01587834743045589,
"grad_norm": 0.000160489886184223,
"learning_rate": 8.303373616950408e-05,
"loss": 46.0,
"step": 130
},
{
"epoch": 0.01648905310085804,
"grad_norm": 0.00014025198470335454,
"learning_rate": 8.141099986478212e-05,
"loss": 46.0,
"step": 135
},
{
"epoch": 0.01709975877126019,
"grad_norm": 0.0001880452618934214,
"learning_rate": 7.973165881521434e-05,
"loss": 46.0,
"step": 140
},
{
"epoch": 0.01771046444166234,
"grad_norm": 0.00018401446868665516,
"learning_rate": 7.799873930687978e-05,
"loss": 46.0,
"step": 145
},
{
"epoch": 0.01832117011206449,
"grad_norm": 0.00023051275638863444,
"learning_rate": 7.621536417786159e-05,
"loss": 46.0,
"step": 150
},
{
"epoch": 0.01893187578246664,
"grad_norm": 0.0001433873549103737,
"learning_rate": 7.438474719068173e-05,
"loss": 46.0,
"step": 155
},
{
"epoch": 0.01954258145286879,
"grad_norm": 0.00019341360894031823,
"learning_rate": 7.251018724088367e-05,
"loss": 46.0,
"step": 160
},
{
"epoch": 0.02015328712327094,
"grad_norm": 0.00021299674699548632,
"learning_rate": 7.059506241219965e-05,
"loss": 46.0,
"step": 165
},
{
"epoch": 0.020763992793673088,
"grad_norm": 0.00024367104924749583,
"learning_rate": 6.864282388901544e-05,
"loss": 46.0,
"step": 170
},
{
"epoch": 0.021374698464075238,
"grad_norm": 0.00023080628307070583,
"learning_rate": 6.665698973710288e-05,
"loss": 46.0,
"step": 175
},
{
"epoch": 0.021985404134477388,
"grad_norm": 0.00023835583124309778,
"learning_rate": 6.464113856382752e-05,
"loss": 46.0,
"step": 180
},
{
"epoch": 0.022596109804879538,
"grad_norm": 0.00025805115001276135,
"learning_rate": 6.259890306925627e-05,
"loss": 46.0,
"step": 185
},
{
"epoch": 0.023206815475281688,
"grad_norm": 0.0002209192607551813,
"learning_rate": 6.0533963499786314e-05,
"loss": 46.0,
"step": 190
},
{
"epoch": 0.023817521145683838,
"grad_norm": 0.00032963225385174155,
"learning_rate": 5.8450041016092464e-05,
"loss": 46.0,
"step": 195
},
{
"epoch": 0.024428226816085988,
"grad_norm": 0.0002963414299301803,
"learning_rate": 5.6350890987343944e-05,
"loss": 46.0,
"step": 200
},
{
"epoch": 0.024428226816085988,
"eval_loss": 11.5,
"eval_runtime": 22.3211,
"eval_samples_per_second": 154.472,
"eval_steps_per_second": 77.236,
"step": 200
},
{
"epoch": 0.025038932486488138,
"grad_norm": 0.00021887882030569017,
"learning_rate": 5.4240296223775465e-05,
"loss": 46.0,
"step": 205
},
{
"epoch": 0.025649638156890288,
"grad_norm": 0.00023999107361305505,
"learning_rate": 5.212206015980742e-05,
"loss": 46.0,
"step": 210
},
{
"epoch": 0.026260343827292438,
"grad_norm": 0.00023386770044453442,
"learning_rate": 5e-05,
"loss": 46.0,
"step": 215
},
{
"epoch": 0.026871049497694584,
"grad_norm": 0.0002699745527934283,
"learning_rate": 4.78779398401926e-05,
"loss": 46.0,
"step": 220
},
{
"epoch": 0.027481755168096735,
"grad_norm": 0.00032450436265207827,
"learning_rate": 4.575970377622456e-05,
"loss": 46.0,
"step": 225
},
{
"epoch": 0.028092460838498885,
"grad_norm": 0.0002890110481530428,
"learning_rate": 4.364910901265606e-05,
"loss": 46.0,
"step": 230
},
{
"epoch": 0.028703166508901035,
"grad_norm": 0.0002972777874674648,
"learning_rate": 4.1549958983907555e-05,
"loss": 46.0,
"step": 235
},
{
"epoch": 0.029313872179303185,
"grad_norm": 0.0003348960308358073,
"learning_rate": 3.94660365002137e-05,
"loss": 46.0,
"step": 240
},
{
"epoch": 0.029924577849705335,
"grad_norm": 0.0003595406888052821,
"learning_rate": 3.740109693074375e-05,
"loss": 46.0,
"step": 245
},
{
"epoch": 0.030535283520107485,
"grad_norm": 0.00034705991856753826,
"learning_rate": 3.5358861436172485e-05,
"loss": 46.0,
"step": 250
},
{
"epoch": 0.031145989190509635,
"grad_norm": 0.00019824641640298069,
"learning_rate": 3.334301026289712e-05,
"loss": 46.0,
"step": 255
},
{
"epoch": 0.03175669486091178,
"grad_norm": 0.0003378826950211078,
"learning_rate": 3.135717611098458e-05,
"loss": 46.0,
"step": 260
},
{
"epoch": 0.032367400531313935,
"grad_norm": 0.00028211367316544056,
"learning_rate": 2.9404937587800375e-05,
"loss": 46.0,
"step": 265
},
{
"epoch": 0.03297810620171608,
"grad_norm": 0.00028978593763895333,
"learning_rate": 2.748981275911633e-05,
"loss": 46.0,
"step": 270
},
{
"epoch": 0.033588811872118235,
"grad_norm": 0.00031383702298626304,
"learning_rate": 2.5615252809318284e-05,
"loss": 46.0,
"step": 275
},
{
"epoch": 0.03419951754252038,
"grad_norm": 0.0003121411718893796,
"learning_rate": 2.3784635822138424e-05,
"loss": 46.0,
"step": 280
},
{
"epoch": 0.034810223212922535,
"grad_norm": 0.000346492714015767,
"learning_rate": 2.2001260693120233e-05,
"loss": 46.0,
"step": 285
},
{
"epoch": 0.03542092888332468,
"grad_norm": 0.00036268169060349464,
"learning_rate": 2.026834118478567e-05,
"loss": 46.0,
"step": 290
},
{
"epoch": 0.03603163455372683,
"grad_norm": 0.00044341495959088206,
"learning_rate": 1.858900013521788e-05,
"loss": 46.0,
"step": 295
},
{
"epoch": 0.03664234022412898,
"grad_norm": 0.0003739699022844434,
"learning_rate": 1.6966263830495936e-05,
"loss": 46.0,
"step": 300
},
{
"epoch": 0.03664234022412898,
"eval_loss": 11.5,
"eval_runtime": 22.4161,
"eval_samples_per_second": 153.818,
"eval_steps_per_second": 76.909,
"step": 300
},
{
"epoch": 0.03725304589453113,
"grad_norm": 0.0001925858814502135,
"learning_rate": 1.5403056551122697e-05,
"loss": 46.0,
"step": 305
},
{
"epoch": 0.03786375156493328,
"grad_norm": 0.00032552919583395123,
"learning_rate": 1.3902195302273779e-05,
"loss": 46.0,
"step": 310
},
{
"epoch": 0.03847445723533543,
"grad_norm": 0.00047274224925786257,
"learning_rate": 1.246638473736378e-05,
"loss": 46.0,
"step": 315
},
{
"epoch": 0.03908516290573758,
"grad_norm": 0.00031744991429150105,
"learning_rate": 1.1098212284078036e-05,
"loss": 46.0,
"step": 320
},
{
"epoch": 0.03969586857613973,
"grad_norm": 0.0003673922037705779,
"learning_rate": 9.800143481652979e-06,
"loss": 46.0,
"step": 325
},
{
"epoch": 0.04030657424654188,
"grad_norm": 0.00039610432577319443,
"learning_rate": 8.574517537807897e-06,
"loss": 46.0,
"step": 330
},
{
"epoch": 0.04091727991694403,
"grad_norm": 0.0004075795004609972,
"learning_rate": 7.423543113334436e-06,
"loss": 46.0,
"step": 335
},
{
"epoch": 0.041527985587346175,
"grad_norm": 0.00036381828249432147,
"learning_rate": 6.349294341940593e-06,
"loss": 46.0,
"step": 340
},
{
"epoch": 0.04213869125774833,
"grad_norm": 0.0003588273248169571,
"learning_rate": 5.353707092521582e-06,
"loss": 46.0,
"step": 345
},
{
"epoch": 0.042749396928150475,
"grad_norm": 0.00041598634561523795,
"learning_rate": 4.43857548059321e-06,
"loss": 46.0,
"step": 350
},
{
"epoch": 0.04336010259855263,
"grad_norm": 0.00031224085250869393,
"learning_rate": 3.605548635174533e-06,
"loss": 46.0,
"step": 355
},
{
"epoch": 0.043970808268954775,
"grad_norm": 0.00037712001358158886,
"learning_rate": 2.85612772694579e-06,
"loss": 46.0,
"step": 360
},
{
"epoch": 0.04458151393935693,
"grad_norm": 0.0003957616863772273,
"learning_rate": 2.191663263037458e-06,
"loss": 46.0,
"step": 365
},
{
"epoch": 0.045192219609759075,
"grad_norm": 0.00037767720641568303,
"learning_rate": 1.6133526533250565e-06,
"loss": 46.0,
"step": 370
},
{
"epoch": 0.04580292528016123,
"grad_norm": 0.00033137385617010295,
"learning_rate": 1.1222380526156928e-06,
"loss": 46.0,
"step": 375
},
{
"epoch": 0.046413630950563375,
"grad_norm": 0.00036192245897836983,
"learning_rate": 7.192044826145771e-07,
"loss": 46.0,
"step": 380
},
{
"epoch": 0.04702433662096553,
"grad_norm": 0.0004093432507943362,
"learning_rate": 4.049782370561583e-07,
"loss": 46.0,
"step": 385
},
{
"epoch": 0.047635042291367675,
"grad_norm": 0.000310163595713675,
"learning_rate": 1.8012557287367392e-07,
"loss": 46.0,
"step": 390
},
{
"epoch": 0.04824574796176982,
"grad_norm": 0.0002933391951955855,
"learning_rate": 4.5051689765929214e-08,
"loss": 46.0,
"step": 395
},
{
"epoch": 0.048856453632171976,
"grad_norm": 0.00036240380723029375,
"learning_rate": 0.0,
"loss": 46.0,
"step": 400
},
{
"epoch": 0.048856453632171976,
"eval_loss": 11.5,
"eval_runtime": 22.3644,
"eval_samples_per_second": 154.174,
"eval_steps_per_second": 77.087,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 16197300191232.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}