AdaptLLM's picture
Upload folder using huggingface_hub (#1)
336cbe1 verified
raw
history blame
18.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9992659652556888,
"eval_steps": 500,
"global_step": 1021,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009787129924149743,
"grad_norm": 90.15490074334751,
"learning_rate": 4.854368932038835e-07,
"loss": 1.5489,
"step": 10
},
{
"epoch": 0.019574259848299486,
"grad_norm": 58.23453270743763,
"learning_rate": 9.70873786407767e-07,
"loss": 1.4928,
"step": 20
},
{
"epoch": 0.029361389772449228,
"grad_norm": 16.767612840452053,
"learning_rate": 1.4563106796116506e-06,
"loss": 1.3729,
"step": 30
},
{
"epoch": 0.03914851969659897,
"grad_norm": 11.079321586087117,
"learning_rate": 1.941747572815534e-06,
"loss": 1.2475,
"step": 40
},
{
"epoch": 0.048935649620748714,
"grad_norm": 6.953520974579646,
"learning_rate": 2.427184466019418e-06,
"loss": 1.1889,
"step": 50
},
{
"epoch": 0.058722779544898455,
"grad_norm": 3.390499745911967,
"learning_rate": 2.912621359223301e-06,
"loss": 1.1427,
"step": 60
},
{
"epoch": 0.0685099094690482,
"grad_norm": 2.234261881443784,
"learning_rate": 3.398058252427185e-06,
"loss": 1.0896,
"step": 70
},
{
"epoch": 0.07829703939319795,
"grad_norm": 2.5807948782523944,
"learning_rate": 3.883495145631068e-06,
"loss": 1.104,
"step": 80
},
{
"epoch": 0.0880841693173477,
"grad_norm": 2.356465079616129,
"learning_rate": 4.368932038834952e-06,
"loss": 1.0746,
"step": 90
},
{
"epoch": 0.09787129924149743,
"grad_norm": 2.4234601720621023,
"learning_rate": 4.854368932038836e-06,
"loss": 1.0793,
"step": 100
},
{
"epoch": 0.10765842916564718,
"grad_norm": 2.117132974644545,
"learning_rate": 4.999282701856139e-06,
"loss": 1.0692,
"step": 110
},
{
"epoch": 0.11744555908979691,
"grad_norm": 1.8840717844600037,
"learning_rate": 4.995770395678171e-06,
"loss": 1.0731,
"step": 120
},
{
"epoch": 0.12723268901394666,
"grad_norm": 2.5216433561534317,
"learning_rate": 4.989335440737587e-06,
"loss": 1.0464,
"step": 130
},
{
"epoch": 0.1370198189380964,
"grad_norm": 2.4579666246587424,
"learning_rate": 4.979985372628657e-06,
"loss": 1.0434,
"step": 140
},
{
"epoch": 0.14680694886224616,
"grad_norm": 2.3287908973842755,
"learning_rate": 4.967731140661878e-06,
"loss": 1.0441,
"step": 150
},
{
"epoch": 0.1565940787863959,
"grad_norm": 2.0436369039725224,
"learning_rate": 4.952587095041882e-06,
"loss": 1.0386,
"step": 160
},
{
"epoch": 0.16638120871054562,
"grad_norm": 2.5733520388523945,
"learning_rate": 4.934570970062765e-06,
"loss": 1.0565,
"step": 170
},
{
"epoch": 0.1761683386346954,
"grad_norm": 1.8256543734485644,
"learning_rate": 4.913703863340504e-06,
"loss": 1.0441,
"step": 180
},
{
"epoch": 0.18595546855884512,
"grad_norm": 1.7844329038469422,
"learning_rate": 4.890010211106795e-06,
"loss": 1.0396,
"step": 190
},
{
"epoch": 0.19574259848299486,
"grad_norm": 3.2695230626975613,
"learning_rate": 4.86351775959322e-06,
"loss": 1.0295,
"step": 200
},
{
"epoch": 0.20552972840714462,
"grad_norm": 2.6115311678055653,
"learning_rate": 4.834257532539292e-06,
"loss": 1.0465,
"step": 210
},
{
"epoch": 0.21531685833129435,
"grad_norm": 2.422852080700085,
"learning_rate": 4.802263794862385e-06,
"loss": 1.017,
"step": 220
},
{
"epoch": 0.2251039882554441,
"grad_norm": 2.2578865515295283,
"learning_rate": 4.767574012532121e-06,
"loss": 1.0176,
"step": 230
},
{
"epoch": 0.23489111817959382,
"grad_norm": 2.087619836151659,
"learning_rate": 4.730228808696201e-06,
"loss": 1.0174,
"step": 240
},
{
"epoch": 0.24467824810374358,
"grad_norm": 1.999324934826496,
"learning_rate": 4.690271916109034e-06,
"loss": 1.0174,
"step": 250
},
{
"epoch": 0.2544653780278933,
"grad_norm": 3.372686449261057,
"learning_rate": 4.647750125918909e-06,
"loss": 1.0168,
"step": 260
},
{
"epoch": 0.2642525079520431,
"grad_norm": 2.5211594090463088,
"learning_rate": 4.6027132328736515e-06,
"loss": 1.0351,
"step": 270
},
{
"epoch": 0.2740396378761928,
"grad_norm": 3.6300783567036525,
"learning_rate": 4.555213977008946e-06,
"loss": 1.0105,
"step": 280
},
{
"epoch": 0.28382676780034255,
"grad_norm": 1.6572768717333426,
"learning_rate": 4.50530798188761e-06,
"loss": 1.0227,
"step": 290
},
{
"epoch": 0.2936138977244923,
"grad_norm": 2.021540908150653,
"learning_rate": 4.453053689462131e-06,
"loss": 1.0178,
"step": 300
},
{
"epoch": 0.303401027648642,
"grad_norm": 1.8651477439042456,
"learning_rate": 4.398512291636768e-06,
"loss": 1.0091,
"step": 310
},
{
"epoch": 0.3131881575727918,
"grad_norm": 2.43880182053301,
"learning_rate": 4.341747658609332e-06,
"loss": 1.0196,
"step": 320
},
{
"epoch": 0.32297528749694154,
"grad_norm": 2.8064493559277963,
"learning_rate": 4.282826264076587e-06,
"loss": 1.0142,
"step": 330
},
{
"epoch": 0.33276241742109125,
"grad_norm": 1.9574404119807658,
"learning_rate": 4.221817107390847e-06,
"loss": 1.0014,
"step": 340
},
{
"epoch": 0.342549547345241,
"grad_norm": 2.163660779817407,
"learning_rate": 4.15879163275892e-06,
"loss": 1.0297,
"step": 350
},
{
"epoch": 0.3523366772693908,
"grad_norm": 1.9653596257387018,
"learning_rate": 4.093823645578037e-06,
"loss": 1.0155,
"step": 360
},
{
"epoch": 0.3621238071935405,
"grad_norm": 2.2959542048619253,
"learning_rate": 4.02698922600672e-06,
"loss": 1.0136,
"step": 370
},
{
"epoch": 0.37191093711769024,
"grad_norm": 1.7061830149355224,
"learning_rate": 3.958366639871826e-06,
"loss": 0.9889,
"step": 380
},
{
"epoch": 0.38169806704184,
"grad_norm": 2.299910460880925,
"learning_rate": 3.888036247016073e-06,
"loss": 0.9959,
"step": 390
},
{
"epoch": 0.3914851969659897,
"grad_norm": 1.9452961119067358,
"learning_rate": 3.81608040719339e-06,
"loss": 1.0049,
"step": 400
},
{
"epoch": 0.4012723268901395,
"grad_norm": 2.744356349117842,
"learning_rate": 3.7425833836222947e-06,
"loss": 1.0018,
"step": 410
},
{
"epoch": 0.41105945681428924,
"grad_norm": 2.477879387395352,
"learning_rate": 3.6676312443102323e-06,
"loss": 1.0152,
"step": 420
},
{
"epoch": 0.42084658673843894,
"grad_norm": 2.1636493383526676,
"learning_rate": 3.591311761264433e-06,
"loss": 1.0056,
"step": 430
},
{
"epoch": 0.4306337166625887,
"grad_norm": 2.743033130906874,
"learning_rate": 3.513714307707321e-06,
"loss": 0.9988,
"step": 440
},
{
"epoch": 0.4404208465867384,
"grad_norm": 2.3096838919036093,
"learning_rate": 3.4349297534168242e-06,
"loss": 0.9973,
"step": 450
},
{
"epoch": 0.4502079765108882,
"grad_norm": 2.7080342141867586,
"learning_rate": 3.3550503583141726e-06,
"loss": 0.9841,
"step": 460
},
{
"epoch": 0.45999510643503794,
"grad_norm": 1.8384006858146424,
"learning_rate": 3.274169664423768e-06,
"loss": 0.9991,
"step": 470
},
{
"epoch": 0.46978223635918764,
"grad_norm": 1.7788391832635966,
"learning_rate": 3.192382386331667e-06,
"loss": 0.9992,
"step": 480
},
{
"epoch": 0.4795693662833374,
"grad_norm": 1.9349340294062531,
"learning_rate": 3.109784300270943e-06,
"loss": 1.004,
"step": 490
},
{
"epoch": 0.48935649620748717,
"grad_norm": 2.089365671821631,
"learning_rate": 3.0264721319638176e-06,
"loss": 0.9892,
"step": 500
},
{
"epoch": 0.4991436261316369,
"grad_norm": 1.6583587633830374,
"learning_rate": 2.9425434433518985e-06,
"loss": 0.9991,
"step": 510
},
{
"epoch": 0.5089307560557866,
"grad_norm": 2.164226641765232,
"learning_rate": 2.8580965183471794e-06,
"loss": 0.9946,
"step": 520
},
{
"epoch": 0.5187178859799364,
"grad_norm": 2.595883151442786,
"learning_rate": 2.773230247737569e-06,
"loss": 1.0007,
"step": 530
},
{
"epoch": 0.5285050159040862,
"grad_norm": 1.9135033210807688,
"learning_rate": 2.6880440133817563e-06,
"loss": 0.997,
"step": 540
},
{
"epoch": 0.5382921458282359,
"grad_norm": 2.1997366611393776,
"learning_rate": 2.602637571829009e-06,
"loss": 0.9916,
"step": 550
},
{
"epoch": 0.5480792757523856,
"grad_norm": 4.036295629554255,
"learning_rate": 2.517110937500185e-06,
"loss": 0.9921,
"step": 560
},
{
"epoch": 0.5578664056765353,
"grad_norm": 1.9466096395859314,
"learning_rate": 2.431564265566781e-06,
"loss": 0.9998,
"step": 570
},
{
"epoch": 0.5676535356006851,
"grad_norm": 2.487288664151329,
"learning_rate": 2.346097734665143e-06,
"loss": 0.9871,
"step": 580
},
{
"epoch": 0.5774406655248349,
"grad_norm": 1.816928462189231,
"learning_rate": 2.2608114295832053e-06,
"loss": 0.9979,
"step": 590
},
{
"epoch": 0.5872277954489846,
"grad_norm": 1.75653406126234,
"learning_rate": 2.175805224057129e-06,
"loss": 0.9724,
"step": 600
},
{
"epoch": 0.5970149253731343,
"grad_norm": 1.7976791598972546,
"learning_rate": 2.0911786638150873e-06,
"loss": 0.9937,
"step": 610
},
{
"epoch": 0.606802055297284,
"grad_norm": 1.9243691484831966,
"learning_rate": 2.0070308500051715e-06,
"loss": 0.9859,
"step": 620
},
{
"epoch": 0.6165891852214338,
"grad_norm": 2.7481202060087835,
"learning_rate": 1.9234603231439e-06,
"loss": 0.9841,
"step": 630
},
{
"epoch": 0.6263763151455836,
"grad_norm": 2.003686173024267,
"learning_rate": 1.84056494772127e-06,
"loss": 0.9749,
"step": 640
},
{
"epoch": 0.6361634450697333,
"grad_norm": 1.6161005997468445,
"learning_rate": 1.7584417975974535e-06,
"loss": 0.985,
"step": 650
},
{
"epoch": 0.6459505749938831,
"grad_norm": 1.5994212448277128,
"learning_rate": 1.6771870423253473e-06,
"loss": 0.9823,
"step": 660
},
{
"epoch": 0.6557377049180327,
"grad_norm": 2.947926766692084,
"learning_rate": 1.5968958345321178e-06,
"loss": 0.9905,
"step": 670
},
{
"epoch": 0.6655248348421825,
"grad_norm": 2.496926900199072,
"learning_rate": 1.517662198491599e-06,
"loss": 0.9802,
"step": 680
},
{
"epoch": 0.6753119647663323,
"grad_norm": 2.50313272426719,
"learning_rate": 1.4395789200180343e-06,
"loss": 0.9839,
"step": 690
},
{
"epoch": 0.685099094690482,
"grad_norm": 3.162725883476327,
"learning_rate": 1.362737437810114e-06,
"loss": 0.9746,
"step": 700
},
{
"epoch": 0.6948862246146318,
"grad_norm": 2.5094677251656696,
"learning_rate": 1.287227736372538e-06,
"loss": 0.9662,
"step": 710
},
{
"epoch": 0.7046733545387815,
"grad_norm": 1.7194609557385765,
"learning_rate": 1.2131382406404866e-06,
"loss": 0.9654,
"step": 720
},
{
"epoch": 0.7144604844629312,
"grad_norm": 2.088741834771206,
"learning_rate": 1.1405557124304338e-06,
"loss": 0.9687,
"step": 730
},
{
"epoch": 0.724247614387081,
"grad_norm": 2.3589729813581637,
"learning_rate": 1.0695651488385168e-06,
"loss": 0.9764,
"step": 740
},
{
"epoch": 0.7340347443112307,
"grad_norm": 1.4080735212450242,
"learning_rate": 1.0002496827054806e-06,
"loss": 0.9734,
"step": 750
},
{
"epoch": 0.7438218742353805,
"grad_norm": 2.028266231817523,
"learning_rate": 9.326904852647345e-07,
"loss": 0.9731,
"step": 760
},
{
"epoch": 0.7536090041595302,
"grad_norm": 1.9325878087268091,
"learning_rate": 8.669666710875319e-07,
"loss": 0.9717,
"step": 770
},
{
"epoch": 0.76339613408368,
"grad_norm": 2.6471850221409583,
"learning_rate": 8.031552054365905e-07,
"loss": 0.9721,
"step": 780
},
{
"epoch": 0.7731832640078297,
"grad_norm": 1.8267904470848466,
"learning_rate": 7.413308141366254e-07,
"loss": 0.9925,
"step": 790
},
{
"epoch": 0.7829703939319794,
"grad_norm": 1.7349835377240377,
"learning_rate": 6.815658960673782e-07,
"loss": 0.9581,
"step": 800
},
{
"epoch": 0.7927575238561292,
"grad_norm": 2.037859066803747,
"learning_rate": 6.239304383815706e-07,
"loss": 0.9699,
"step": 810
},
{
"epoch": 0.802544653780279,
"grad_norm": 2.1130579933367257,
"learning_rate": 5.684919345471029e-07,
"loss": 0.9905,
"step": 820
},
{
"epoch": 0.8123317837044287,
"grad_norm": 1.946465156584691,
"learning_rate": 5.15315305309455e-07,
"loss": 0.973,
"step": 830
},
{
"epoch": 0.8221189136285785,
"grad_norm": 2.5768060337551972,
"learning_rate": 4.644628226668485e-07,
"loss": 0.9582,
"step": 840
},
{
"epoch": 0.8319060435527281,
"grad_norm": 2.173860689008899,
"learning_rate": 4.159940369472015e-07,
"loss": 0.9729,
"step": 850
},
{
"epoch": 0.8416931734768779,
"grad_norm": 2.291543898620316,
"learning_rate": 3.699657070722698e-07,
"loss": 0.9761,
"step": 860
},
{
"epoch": 0.8514803034010276,
"grad_norm": 2.088024718750774,
"learning_rate": 3.2643173409063976e-07,
"loss": 0.9818,
"step": 870
},
{
"epoch": 0.8612674333251774,
"grad_norm": 1.8060057642166407,
"learning_rate": 2.854430980574002e-07,
"loss": 0.9636,
"step": 880
},
{
"epoch": 0.8710545632493272,
"grad_norm": 1.749866063660457,
"learning_rate": 2.4704779833442993e-07,
"loss": 0.9636,
"step": 890
},
{
"epoch": 0.8808416931734768,
"grad_norm": 2.0741463604546913,
"learning_rate": 2.1129079738118424e-07,
"loss": 0.963,
"step": 900
},
{
"epoch": 0.8906288230976266,
"grad_norm": 1.4388364613303446,
"learning_rate": 1.782139681018244e-07,
"loss": 0.9757,
"step": 910
},
{
"epoch": 0.9004159530217763,
"grad_norm": 2.3114929534129343,
"learning_rate": 1.4785604481034639e-07,
"loss": 0.9678,
"step": 920
},
{
"epoch": 0.9102030829459261,
"grad_norm": 2.0415525131838432,
"learning_rate": 1.202525778711172e-07,
"loss": 0.9595,
"step": 930
},
{
"epoch": 0.9199902128700759,
"grad_norm": 2.3293806080394104,
"learning_rate": 9.54358920679524e-08,
"loss": 0.98,
"step": 940
},
{
"epoch": 0.9297773427942256,
"grad_norm": 2.0830083897095406,
"learning_rate": 7.343504875047814e-08,
"loss": 0.964,
"step": 950
},
{
"epoch": 0.9395644727183753,
"grad_norm": 2.218326205399271,
"learning_rate": 5.427581180210639e-08,
"loss": 0.979,
"step": 960
},
{
"epoch": 0.949351602642525,
"grad_norm": 2.153253403337433,
"learning_rate": 3.798061746947995e-08,
"loss": 0.9578,
"step": 970
},
{
"epoch": 0.9591387325666748,
"grad_norm": 1.611398342662154,
"learning_rate": 2.456854808871201e-08,
"loss": 0.9759,
"step": 980
},
{
"epoch": 0.9689258624908246,
"grad_norm": 2.2214603407537883,
"learning_rate": 1.4055309739195166e-08,
"loss": 0.9691,
"step": 990
},
{
"epoch": 0.9787129924149743,
"grad_norm": 1.8872382897952824,
"learning_rate": 6.453213851142226e-09,
"loss": 0.9866,
"step": 1000
},
{
"epoch": 0.9885001223391241,
"grad_norm": 3.466854317296078,
"learning_rate": 1.7711627883998383e-09,
"loss": 0.9735,
"step": 1010
},
{
"epoch": 0.9982872522632737,
"grad_norm": 1.8136326675400594,
"learning_rate": 1.463942341850544e-11,
"loss": 0.9689,
"step": 1020
},
{
"epoch": 0.9992659652556888,
"step": 1021,
"total_flos": 1.136081069573931e+18,
"train_loss": 1.019411426333092,
"train_runtime": 37962.3304,
"train_samples_per_second": 3.444,
"train_steps_per_second": 0.027
}
],
"logging_steps": 10,
"max_steps": 1021,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.136081069573931e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}