1.5B-1000 / trainer_state.json
chouss's picture
Uploading folder contents
6924610 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.445115810674723,
"eval_steps": 100,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06445115810674723,
"grad_norm": 8.751441955566406,
"learning_rate": 5.154639175257732e-07,
"loss": 8.7908,
"step": 10
},
{
"epoch": 0.12890231621349446,
"grad_norm": 5.980831146240234,
"learning_rate": 1.0309278350515464e-06,
"loss": 8.4297,
"step": 20
},
{
"epoch": 0.1933534743202417,
"grad_norm": 3.555551528930664,
"learning_rate": 1.5463917525773197e-06,
"loss": 7.9847,
"step": 30
},
{
"epoch": 0.2578046324269889,
"grad_norm": 2.1481900215148926,
"learning_rate": 2.061855670103093e-06,
"loss": 7.5452,
"step": 40
},
{
"epoch": 0.32225579053373615,
"grad_norm": 1.3654990196228027,
"learning_rate": 2.577319587628866e-06,
"loss": 7.1644,
"step": 50
},
{
"epoch": 0.3867069486404834,
"grad_norm": 1.294124960899353,
"learning_rate": 3.0927835051546395e-06,
"loss": 6.8552,
"step": 60
},
{
"epoch": 0.4511581067472306,
"grad_norm": 1.3459229469299316,
"learning_rate": 3.6082474226804126e-06,
"loss": 6.5137,
"step": 70
},
{
"epoch": 0.5156092648539778,
"grad_norm": 1.345466136932373,
"learning_rate": 4.123711340206186e-06,
"loss": 6.2217,
"step": 80
},
{
"epoch": 0.5800604229607251,
"grad_norm": 1.0641965866088867,
"learning_rate": 4.639175257731959e-06,
"loss": 5.999,
"step": 90
},
{
"epoch": 0.6445115810674723,
"grad_norm": 1.5514322519302368,
"learning_rate": 5.154639175257732e-06,
"loss": 5.8344,
"step": 100
},
{
"epoch": 0.6445115810674723,
"eval_loss": 5.791144847869873,
"eval_runtime": 20.2064,
"eval_samples_per_second": 79.43,
"eval_steps_per_second": 4.998,
"step": 100
},
{
"epoch": 0.7089627391742196,
"grad_norm": 9.654533386230469,
"learning_rate": 5.670103092783505e-06,
"loss": 5.7513,
"step": 110
},
{
"epoch": 0.7734138972809668,
"grad_norm": 2.767559289932251,
"learning_rate": 6.185567010309279e-06,
"loss": 5.6479,
"step": 120
},
{
"epoch": 0.837865055387714,
"grad_norm": 1.2786972522735596,
"learning_rate": 6.701030927835052e-06,
"loss": 5.5542,
"step": 130
},
{
"epoch": 0.9023162134944612,
"grad_norm": 1.3562631607055664,
"learning_rate": 7.216494845360825e-06,
"loss": 5.4406,
"step": 140
},
{
"epoch": 0.9667673716012085,
"grad_norm": 3.51896333694458,
"learning_rate": 7.731958762886599e-06,
"loss": 5.323,
"step": 150
},
{
"epoch": 1.0312185297079557,
"grad_norm": 1.808701753616333,
"learning_rate": 8.247422680412371e-06,
"loss": 5.6701,
"step": 160
},
{
"epoch": 1.095669687814703,
"grad_norm": 3.2831215858459473,
"learning_rate": 8.762886597938146e-06,
"loss": 5.1268,
"step": 170
},
{
"epoch": 1.1601208459214503,
"grad_norm": 2.606133460998535,
"learning_rate": 9.278350515463918e-06,
"loss": 5.0758,
"step": 180
},
{
"epoch": 1.2245720040281973,
"grad_norm": 1.7396525144577026,
"learning_rate": 9.793814432989691e-06,
"loss": 5.0134,
"step": 190
},
{
"epoch": 1.2890231621349446,
"grad_norm": 0.7748392820358276,
"learning_rate": 1.0309278350515464e-05,
"loss": 4.9703,
"step": 200
},
{
"epoch": 1.2890231621349446,
"eval_loss": 4.946072578430176,
"eval_runtime": 19.5278,
"eval_samples_per_second": 82.19,
"eval_steps_per_second": 5.172,
"step": 200
},
{
"epoch": 1.353474320241692,
"grad_norm": 2.377145290374756,
"learning_rate": 1.0824742268041238e-05,
"loss": 4.874,
"step": 210
},
{
"epoch": 1.417925478348439,
"grad_norm": 1.2470804452896118,
"learning_rate": 1.134020618556701e-05,
"loss": 4.8473,
"step": 220
},
{
"epoch": 1.4823766364551862,
"grad_norm": 2.26425838470459,
"learning_rate": 1.1855670103092785e-05,
"loss": 4.7739,
"step": 230
},
{
"epoch": 1.5468277945619335,
"grad_norm": 1.238208532333374,
"learning_rate": 1.2371134020618558e-05,
"loss": 4.7524,
"step": 240
},
{
"epoch": 1.6112789526686808,
"grad_norm": 0.9819815158843994,
"learning_rate": 1.2886597938144332e-05,
"loss": 4.6977,
"step": 250
},
{
"epoch": 1.675730110775428,
"grad_norm": 1.37124502658844,
"learning_rate": 1.3402061855670103e-05,
"loss": 4.5812,
"step": 260
},
{
"epoch": 1.7401812688821754,
"grad_norm": 1.0728330612182617,
"learning_rate": 1.3917525773195878e-05,
"loss": 4.5445,
"step": 270
},
{
"epoch": 1.8046324269889225,
"grad_norm": 1.4190095663070679,
"learning_rate": 1.443298969072165e-05,
"loss": 4.4797,
"step": 280
},
{
"epoch": 1.8690835850956697,
"grad_norm": 1.3823643922805786,
"learning_rate": 1.4948453608247425e-05,
"loss": 4.3833,
"step": 290
},
{
"epoch": 1.9335347432024168,
"grad_norm": 1.6940747499465942,
"learning_rate": 1.5463917525773197e-05,
"loss": 4.2822,
"step": 300
},
{
"epoch": 1.9335347432024168,
"eval_loss": 4.213192462921143,
"eval_runtime": 19.7585,
"eval_samples_per_second": 81.231,
"eval_steps_per_second": 5.112,
"step": 300
},
{
"epoch": 1.997985901309164,
"grad_norm": 1.6004289388656616,
"learning_rate": 1.597938144329897e-05,
"loss": 4.1608,
"step": 310
},
{
"epoch": 2.0624370594159114,
"grad_norm": 2.5717203617095947,
"learning_rate": 1.6494845360824743e-05,
"loss": 4.392,
"step": 320
},
{
"epoch": 2.1268882175226587,
"grad_norm": 1.9171743392944336,
"learning_rate": 1.7010309278350517e-05,
"loss": 3.9273,
"step": 330
},
{
"epoch": 2.191339375629406,
"grad_norm": 2.1511971950531006,
"learning_rate": 1.752577319587629e-05,
"loss": 3.8527,
"step": 340
},
{
"epoch": 2.2557905337361532,
"grad_norm": 1.949204683303833,
"learning_rate": 1.8041237113402062e-05,
"loss": 3.7843,
"step": 350
},
{
"epoch": 2.3202416918429005,
"grad_norm": 1.6854971647262573,
"learning_rate": 1.8556701030927837e-05,
"loss": 3.7476,
"step": 360
},
{
"epoch": 2.3846928499496474,
"grad_norm": 1.9099489450454712,
"learning_rate": 1.907216494845361e-05,
"loss": 3.6868,
"step": 370
},
{
"epoch": 2.4491440080563947,
"grad_norm": 1.9068998098373413,
"learning_rate": 1.9587628865979382e-05,
"loss": 3.6684,
"step": 380
},
{
"epoch": 2.513595166163142,
"grad_norm": 1.3064167499542236,
"learning_rate": 1.9999996358015542e-05,
"loss": 3.6028,
"step": 390
},
{
"epoch": 2.5780463242698892,
"grad_norm": 1.7267900705337524,
"learning_rate": 1.9999868888837957e-05,
"loss": 3.5599,
"step": 400
},
{
"epoch": 2.5780463242698892,
"eval_loss": 3.5509181022644043,
"eval_runtime": 20.1957,
"eval_samples_per_second": 79.472,
"eval_steps_per_second": 5.001,
"step": 400
},
{
"epoch": 2.6424974823766365,
"grad_norm": 2.2463932037353516,
"learning_rate": 1.9999559323090132e-05,
"loss": 3.5059,
"step": 410
},
{
"epoch": 2.706948640483384,
"grad_norm": 2.046381950378418,
"learning_rate": 1.9999067666409225e-05,
"loss": 3.4585,
"step": 420
},
{
"epoch": 2.771399798590131,
"grad_norm": 1.8293402194976807,
"learning_rate": 1.9998393927748257e-05,
"loss": 3.4393,
"step": 430
},
{
"epoch": 2.835850956696878,
"grad_norm": 1.701651930809021,
"learning_rate": 1.9997538119375938e-05,
"loss": 3.3986,
"step": 440
},
{
"epoch": 2.900302114803625,
"grad_norm": 1.8549060821533203,
"learning_rate": 1.9996500256876447e-05,
"loss": 3.3568,
"step": 450
},
{
"epoch": 2.9647532729103725,
"grad_norm": 2.38415789604187,
"learning_rate": 1.999528035914915e-05,
"loss": 3.3115,
"step": 460
},
{
"epoch": 3.02920443101712,
"grad_norm": 3.2878332138061523,
"learning_rate": 1.9993878448408263e-05,
"loss": 3.5941,
"step": 470
},
{
"epoch": 3.093655589123867,
"grad_norm": 2.5062038898468018,
"learning_rate": 1.999229455018243e-05,
"loss": 3.2588,
"step": 480
},
{
"epoch": 3.1581067472306144,
"grad_norm": 2.2001876831054688,
"learning_rate": 1.9990528693314273e-05,
"loss": 3.2381,
"step": 490
},
{
"epoch": 3.2225579053373616,
"grad_norm": 2.3333001136779785,
"learning_rate": 1.9988580909959864e-05,
"loss": 3.2271,
"step": 500
},
{
"epoch": 3.2225579053373616,
"eval_loss": 3.236539840698242,
"eval_runtime": 19.5719,
"eval_samples_per_second": 82.005,
"eval_steps_per_second": 5.16,
"step": 500
},
{
"epoch": 3.287009063444109,
"grad_norm": 2.109981060028076,
"learning_rate": 1.9986451235588135e-05,
"loss": 3.2038,
"step": 510
},
{
"epoch": 3.351460221550856,
"grad_norm": 2.376877546310425,
"learning_rate": 1.9984139708980228e-05,
"loss": 3.1678,
"step": 520
},
{
"epoch": 3.415911379657603,
"grad_norm": 2.0894505977630615,
"learning_rate": 1.9981646372228813e-05,
"loss": 3.1701,
"step": 530
},
{
"epoch": 3.4803625377643503,
"grad_norm": 1.9505690336227417,
"learning_rate": 1.997897127073728e-05,
"loss": 3.1902,
"step": 540
},
{
"epoch": 3.5448136958710976,
"grad_norm": 3.200566530227661,
"learning_rate": 1.997611445321896e-05,
"loss": 3.1286,
"step": 550
},
{
"epoch": 3.609264853977845,
"grad_norm": 2.295381784439087,
"learning_rate": 1.9973075971696195e-05,
"loss": 3.1308,
"step": 560
},
{
"epoch": 3.673716012084592,
"grad_norm": 2.323788642883301,
"learning_rate": 1.9969855881499413e-05,
"loss": 3.1093,
"step": 570
},
{
"epoch": 3.7381671701913395,
"grad_norm": 1.9054023027420044,
"learning_rate": 1.996645424126613e-05,
"loss": 3.1029,
"step": 580
},
{
"epoch": 3.8026183282980868,
"grad_norm": 2.854268789291382,
"learning_rate": 1.996287111293986e-05,
"loss": 3.0843,
"step": 590
},
{
"epoch": 3.8670694864048336,
"grad_norm": 2.066882848739624,
"learning_rate": 1.9959106561768988e-05,
"loss": 3.0301,
"step": 600
},
{
"epoch": 3.8670694864048336,
"eval_loss": 3.056886672973633,
"eval_runtime": 20.2056,
"eval_samples_per_second": 79.433,
"eval_steps_per_second": 4.999,
"step": 600
},
{
"epoch": 3.931520644511581,
"grad_norm": 2.220766544342041,
"learning_rate": 1.9955160656305606e-05,
"loss": 3.02,
"step": 610
},
{
"epoch": 3.995971802618328,
"grad_norm": 2.461122751235962,
"learning_rate": 1.995103346840424e-05,
"loss": 3.0121,
"step": 620
},
{
"epoch": 4.0604229607250755,
"grad_norm": 1.9937182664871216,
"learning_rate": 1.9946725073220542e-05,
"loss": 3.249,
"step": 630
},
{
"epoch": 4.124874118831823,
"grad_norm": 2.3354651927948,
"learning_rate": 1.9942235549209955e-05,
"loss": 2.9879,
"step": 640
},
{
"epoch": 4.18932527693857,
"grad_norm": 2.059208393096924,
"learning_rate": 1.9937564978126233e-05,
"loss": 2.987,
"step": 650
},
{
"epoch": 4.253776435045317,
"grad_norm": 2.804398775100708,
"learning_rate": 1.9932713445019993e-05,
"loss": 2.9377,
"step": 660
},
{
"epoch": 4.318227593152065,
"grad_norm": 2.1567623615264893,
"learning_rate": 1.992768103823714e-05,
"loss": 2.9478,
"step": 670
},
{
"epoch": 4.382678751258812,
"grad_norm": 2.021939992904663,
"learning_rate": 1.9922467849417288e-05,
"loss": 2.9119,
"step": 680
},
{
"epoch": 4.447129909365559,
"grad_norm": 1.5279889106750488,
"learning_rate": 1.9917073973492055e-05,
"loss": 2.9033,
"step": 690
},
{
"epoch": 4.5115810674723065,
"grad_norm": 1.7887712717056274,
"learning_rate": 1.991149950868336e-05,
"loss": 2.8944,
"step": 700
},
{
"epoch": 4.5115810674723065,
"eval_loss": 2.8965914249420166,
"eval_runtime": 21.0307,
"eval_samples_per_second": 76.317,
"eval_steps_per_second": 4.802,
"step": 700
},
{
"epoch": 4.576032225579054,
"grad_norm": 1.7073420286178589,
"learning_rate": 1.9905744556501627e-05,
"loss": 2.8471,
"step": 710
},
{
"epoch": 4.640483383685801,
"grad_norm": 1.862641453742981,
"learning_rate": 1.989980922174394e-05,
"loss": 2.8432,
"step": 720
},
{
"epoch": 4.704934541792548,
"grad_norm": 1.9594634771347046,
"learning_rate": 1.9893693612492116e-05,
"loss": 2.8482,
"step": 730
},
{
"epoch": 4.769385699899295,
"grad_norm": 1.6772149801254272,
"learning_rate": 1.988739784011077e-05,
"loss": 2.8538,
"step": 740
},
{
"epoch": 4.833836858006042,
"grad_norm": 1.9590495824813843,
"learning_rate": 1.9880922019245258e-05,
"loss": 2.8703,
"step": 750
},
{
"epoch": 4.898288016112789,
"grad_norm": 1.4671125411987305,
"learning_rate": 1.9874266267819604e-05,
"loss": 2.8078,
"step": 760
},
{
"epoch": 4.962739174219537,
"grad_norm": 1.8790684938430786,
"learning_rate": 1.986743070703435e-05,
"loss": 2.779,
"step": 770
},
{
"epoch": 5.027190332326284,
"grad_norm": 1.6177160739898682,
"learning_rate": 1.9860415461364343e-05,
"loss": 3.0088,
"step": 780
},
{
"epoch": 5.091641490433031,
"grad_norm": 1.774277687072754,
"learning_rate": 1.9853220658556474e-05,
"loss": 2.7841,
"step": 790
},
{
"epoch": 5.1560926485397784,
"grad_norm": 1.4572412967681885,
"learning_rate": 1.984584642962735e-05,
"loss": 2.7748,
"step": 800
},
{
"epoch": 5.1560926485397784,
"eval_loss": 2.7890875339508057,
"eval_runtime": 19.598,
"eval_samples_per_second": 81.896,
"eval_steps_per_second": 5.154,
"step": 800
},
{
"epoch": 5.220543806646526,
"grad_norm": 1.4425498247146606,
"learning_rate": 1.9838292908860922e-05,
"loss": 2.7712,
"step": 810
},
{
"epoch": 5.284994964753273,
"grad_norm": 1.4506257772445679,
"learning_rate": 1.9830560233806006e-05,
"loss": 2.758,
"step": 820
},
{
"epoch": 5.34944612286002,
"grad_norm": 1.3948203325271606,
"learning_rate": 1.982264854527381e-05,
"loss": 2.7487,
"step": 830
},
{
"epoch": 5.413897280966768,
"grad_norm": 1.170486330986023,
"learning_rate": 1.9814557987335363e-05,
"loss": 2.7558,
"step": 840
},
{
"epoch": 5.478348439073515,
"grad_norm": 1.3575382232666016,
"learning_rate": 1.980628870731888e-05,
"loss": 2.7266,
"step": 850
},
{
"epoch": 5.542799597180262,
"grad_norm": 1.3390381336212158,
"learning_rate": 1.979784085580708e-05,
"loss": 2.7156,
"step": 860
},
{
"epoch": 5.6072507552870094,
"grad_norm": 1.3795325756072998,
"learning_rate": 1.978921458663447e-05,
"loss": 2.7069,
"step": 870
},
{
"epoch": 5.671701913393756,
"grad_norm": 1.2567253112792969,
"learning_rate": 1.9780410056884505e-05,
"loss": 2.7121,
"step": 880
},
{
"epoch": 5.736153071500503,
"grad_norm": 1.6211382150650024,
"learning_rate": 1.977142742688676e-05,
"loss": 2.7006,
"step": 890
},
{
"epoch": 5.80060422960725,
"grad_norm": 1.5447043180465698,
"learning_rate": 1.9762266860213982e-05,
"loss": 2.6635,
"step": 900
},
{
"epoch": 5.80060422960725,
"eval_loss": 2.704418659210205,
"eval_runtime": 20.2115,
"eval_samples_per_second": 79.41,
"eval_steps_per_second": 4.997,
"step": 900
},
{
"epoch": 5.865055387713998,
"grad_norm": 1.28032648563385,
"learning_rate": 1.9752928523679145e-05,
"loss": 2.7062,
"step": 910
},
{
"epoch": 5.929506545820745,
"grad_norm": 1.323886752128601,
"learning_rate": 1.974341258733238e-05,
"loss": 2.6955,
"step": 920
},
{
"epoch": 5.993957703927492,
"grad_norm": 1.370737910270691,
"learning_rate": 1.9733719224457896e-05,
"loss": 2.6607,
"step": 930
},
{
"epoch": 6.05840886203424,
"grad_norm": 1.2304973602294922,
"learning_rate": 1.972384861157082e-05,
"loss": 2.8645,
"step": 940
},
{
"epoch": 6.122860020140987,
"grad_norm": 1.251336693763733,
"learning_rate": 1.9713800928413987e-05,
"loss": 2.678,
"step": 950
},
{
"epoch": 6.187311178247734,
"grad_norm": 1.4331055879592896,
"learning_rate": 1.9703576357954653e-05,
"loss": 2.6339,
"step": 960
},
{
"epoch": 6.251762336354481,
"grad_norm": 1.4404268264770508,
"learning_rate": 1.969317508638119e-05,
"loss": 2.6497,
"step": 970
},
{
"epoch": 6.316213494461229,
"grad_norm": 1.2648017406463623,
"learning_rate": 1.9682597303099663e-05,
"loss": 2.6194,
"step": 980
},
{
"epoch": 6.380664652567976,
"grad_norm": 1.464142084121704,
"learning_rate": 1.9671843200730408e-05,
"loss": 2.6342,
"step": 990
},
{
"epoch": 6.445115810674723,
"grad_norm": 1.4572694301605225,
"learning_rate": 1.96609129751045e-05,
"loss": 2.6102,
"step": 1000
},
{
"epoch": 6.445115810674723,
"eval_loss": 2.6344494819641113,
"eval_runtime": 19.7801,
"eval_samples_per_second": 81.142,
"eval_steps_per_second": 5.106,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 7750,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.2975800261987533e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}