bangla_llama_7b_instruct / trainer_state.json
hassanaliemon's picture
Upload 8 files
a324a10 verified
raw
history blame contribute delete
No virus
26.6 kB
{
"best_metric": 1.2448393106460571,
"best_model_checkpoint": "./lora_bn/checkpoint-1400",
"epoch": 0.9003215434083601,
"eval_steps": 200,
"global_step": 1400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006430868167202572,
"grad_norm": 1.3939015865325928,
"learning_rate": 2.9999999999999997e-05,
"loss": 3.6079,
"step": 10
},
{
"epoch": 0.012861736334405145,
"grad_norm": 1.4347032308578491,
"learning_rate": 5.9999999999999995e-05,
"loss": 3.3531,
"step": 20
},
{
"epoch": 0.01929260450160772,
"grad_norm": 1.10429847240448,
"learning_rate": 8.999999999999999e-05,
"loss": 2.7805,
"step": 30
},
{
"epoch": 0.02572347266881029,
"grad_norm": 0.6873103976249695,
"learning_rate": 0.00011999999999999999,
"loss": 2.3846,
"step": 40
},
{
"epoch": 0.03215434083601286,
"grad_norm": 0.749026894569397,
"learning_rate": 0.00015,
"loss": 2.2196,
"step": 50
},
{
"epoch": 0.03858520900321544,
"grad_norm": 0.6276043057441711,
"learning_rate": 0.00017999999999999998,
"loss": 2.0483,
"step": 60
},
{
"epoch": 0.04501607717041801,
"grad_norm": 0.7210396528244019,
"learning_rate": 0.00020999999999999998,
"loss": 1.8821,
"step": 70
},
{
"epoch": 0.05144694533762058,
"grad_norm": 0.6538994312286377,
"learning_rate": 0.00023999999999999998,
"loss": 1.7903,
"step": 80
},
{
"epoch": 0.05787781350482315,
"grad_norm": 0.7155820727348328,
"learning_rate": 0.00027,
"loss": 1.7298,
"step": 90
},
{
"epoch": 0.06430868167202572,
"grad_norm": 0.6406599283218384,
"learning_rate": 0.0003,
"loss": 1.6995,
"step": 100
},
{
"epoch": 0.0707395498392283,
"grad_norm": 0.5722670555114746,
"learning_rate": 0.00029793814432989686,
"loss": 1.6895,
"step": 110
},
{
"epoch": 0.07717041800643087,
"grad_norm": 0.624355137348175,
"learning_rate": 0.0002958762886597938,
"loss": 1.6073,
"step": 120
},
{
"epoch": 0.08360128617363344,
"grad_norm": 0.5818140506744385,
"learning_rate": 0.0002938144329896907,
"loss": 1.6228,
"step": 130
},
{
"epoch": 0.09003215434083602,
"grad_norm": 0.5287107229232788,
"learning_rate": 0.0002917525773195876,
"loss": 1.6014,
"step": 140
},
{
"epoch": 0.09646302250803858,
"grad_norm": 0.5458894968032837,
"learning_rate": 0.0002896907216494845,
"loss": 1.5688,
"step": 150
},
{
"epoch": 0.10289389067524116,
"grad_norm": 0.5477120876312256,
"learning_rate": 0.0002876288659793814,
"loss": 1.5029,
"step": 160
},
{
"epoch": 0.10932475884244373,
"grad_norm": 0.5350768566131592,
"learning_rate": 0.0002855670103092783,
"loss": 1.559,
"step": 170
},
{
"epoch": 0.1157556270096463,
"grad_norm": 0.5387317538261414,
"learning_rate": 0.00028350515463917525,
"loss": 1.5013,
"step": 180
},
{
"epoch": 0.12218649517684887,
"grad_norm": 0.5648557543754578,
"learning_rate": 0.00028144329896907214,
"loss": 1.5107,
"step": 190
},
{
"epoch": 0.12861736334405144,
"grad_norm": 0.5028502345085144,
"learning_rate": 0.0002793814432989691,
"loss": 1.525,
"step": 200
},
{
"epoch": 0.12861736334405144,
"eval_loss": 1.4951640367507935,
"eval_runtime": 340.2657,
"eval_samples_per_second": 5.878,
"eval_steps_per_second": 0.735,
"step": 200
},
{
"epoch": 0.13504823151125403,
"grad_norm": 0.520252525806427,
"learning_rate": 0.00027731958762886597,
"loss": 1.5075,
"step": 210
},
{
"epoch": 0.1414790996784566,
"grad_norm": 0.5032276511192322,
"learning_rate": 0.00027525773195876286,
"loss": 1.4617,
"step": 220
},
{
"epoch": 0.14790996784565916,
"grad_norm": 0.47061678767204285,
"learning_rate": 0.00027319587628865975,
"loss": 1.4809,
"step": 230
},
{
"epoch": 0.15434083601286175,
"grad_norm": 0.4570547640323639,
"learning_rate": 0.0002711340206185567,
"loss": 1.4195,
"step": 240
},
{
"epoch": 0.1607717041800643,
"grad_norm": 0.49685975909233093,
"learning_rate": 0.0002690721649484536,
"loss": 1.4835,
"step": 250
},
{
"epoch": 0.16720257234726688,
"grad_norm": 0.5451533794403076,
"learning_rate": 0.00026701030927835047,
"loss": 1.4514,
"step": 260
},
{
"epoch": 0.17363344051446947,
"grad_norm": 0.472751647233963,
"learning_rate": 0.0002649484536082474,
"loss": 1.4538,
"step": 270
},
{
"epoch": 0.18006430868167203,
"grad_norm": 0.4936389923095703,
"learning_rate": 0.0002628865979381443,
"loss": 1.4562,
"step": 280
},
{
"epoch": 0.1864951768488746,
"grad_norm": 0.5179173350334167,
"learning_rate": 0.00026082474226804124,
"loss": 1.45,
"step": 290
},
{
"epoch": 0.19292604501607716,
"grad_norm": 0.48392704129219055,
"learning_rate": 0.00025876288659793813,
"loss": 1.4381,
"step": 300
},
{
"epoch": 0.19935691318327975,
"grad_norm": 0.4685576856136322,
"learning_rate": 0.000256701030927835,
"loss": 1.439,
"step": 310
},
{
"epoch": 0.2057877813504823,
"grad_norm": 0.5039055347442627,
"learning_rate": 0.0002546391752577319,
"loss": 1.3921,
"step": 320
},
{
"epoch": 0.21221864951768488,
"grad_norm": 0.49153512716293335,
"learning_rate": 0.00025257731958762885,
"loss": 1.4272,
"step": 330
},
{
"epoch": 0.21864951768488747,
"grad_norm": 0.48751088976860046,
"learning_rate": 0.00025051546391752574,
"loss": 1.3853,
"step": 340
},
{
"epoch": 0.22508038585209003,
"grad_norm": 0.4660916328430176,
"learning_rate": 0.0002484536082474227,
"loss": 1.3963,
"step": 350
},
{
"epoch": 0.2315112540192926,
"grad_norm": 0.44358980655670166,
"learning_rate": 0.0002463917525773196,
"loss": 1.4165,
"step": 360
},
{
"epoch": 0.2379421221864952,
"grad_norm": 0.49413594603538513,
"learning_rate": 0.00024432989690721646,
"loss": 1.3768,
"step": 370
},
{
"epoch": 0.24437299035369775,
"grad_norm": 0.44838714599609375,
"learning_rate": 0.00024226804123711338,
"loss": 1.3877,
"step": 380
},
{
"epoch": 0.2508038585209003,
"grad_norm": 0.46237707138061523,
"learning_rate": 0.0002402061855670103,
"loss": 1.3781,
"step": 390
},
{
"epoch": 0.2572347266881029,
"grad_norm": 0.49087774753570557,
"learning_rate": 0.00023814432989690718,
"loss": 1.4006,
"step": 400
},
{
"epoch": 0.2572347266881029,
"eval_loss": 1.3819265365600586,
"eval_runtime": 340.2482,
"eval_samples_per_second": 5.878,
"eval_steps_per_second": 0.735,
"step": 400
},
{
"epoch": 0.26366559485530544,
"grad_norm": 0.47525086998939514,
"learning_rate": 0.0002360824742268041,
"loss": 1.3835,
"step": 410
},
{
"epoch": 0.27009646302250806,
"grad_norm": 0.4709095358848572,
"learning_rate": 0.00023402061855670102,
"loss": 1.4037,
"step": 420
},
{
"epoch": 0.2765273311897106,
"grad_norm": 0.4556897282600403,
"learning_rate": 0.00023195876288659793,
"loss": 1.4335,
"step": 430
},
{
"epoch": 0.2829581993569132,
"grad_norm": 0.5150389671325684,
"learning_rate": 0.00022989690721649485,
"loss": 1.3855,
"step": 440
},
{
"epoch": 0.28938906752411575,
"grad_norm": 0.4785289764404297,
"learning_rate": 0.0002278350515463917,
"loss": 1.3572,
"step": 450
},
{
"epoch": 0.2958199356913183,
"grad_norm": 0.4818146526813507,
"learning_rate": 0.00022577319587628863,
"loss": 1.4095,
"step": 460
},
{
"epoch": 0.3022508038585209,
"grad_norm": 0.44462355971336365,
"learning_rate": 0.00022371134020618554,
"loss": 1.3607,
"step": 470
},
{
"epoch": 0.3086816720257235,
"grad_norm": 0.4708723723888397,
"learning_rate": 0.00022164948453608246,
"loss": 1.3985,
"step": 480
},
{
"epoch": 0.31511254019292606,
"grad_norm": 0.46771880984306335,
"learning_rate": 0.00021958762886597935,
"loss": 1.364,
"step": 490
},
{
"epoch": 0.3215434083601286,
"grad_norm": 0.4421800971031189,
"learning_rate": 0.00021752577319587626,
"loss": 1.3938,
"step": 500
},
{
"epoch": 0.3279742765273312,
"grad_norm": 0.4229006767272949,
"learning_rate": 0.00021546391752577318,
"loss": 1.3547,
"step": 510
},
{
"epoch": 0.33440514469453375,
"grad_norm": 0.4854479730129242,
"learning_rate": 0.0002134020618556701,
"loss": 1.3448,
"step": 520
},
{
"epoch": 0.3408360128617363,
"grad_norm": 0.4800192713737488,
"learning_rate": 0.000211340206185567,
"loss": 1.3716,
"step": 530
},
{
"epoch": 0.34726688102893893,
"grad_norm": 0.4886428415775299,
"learning_rate": 0.0002092783505154639,
"loss": 1.381,
"step": 540
},
{
"epoch": 0.3536977491961415,
"grad_norm": 0.4030856788158417,
"learning_rate": 0.0002072164948453608,
"loss": 1.3242,
"step": 550
},
{
"epoch": 0.36012861736334406,
"grad_norm": 0.5465673208236694,
"learning_rate": 0.0002051546391752577,
"loss": 1.3309,
"step": 560
},
{
"epoch": 0.3665594855305466,
"grad_norm": 0.47386476397514343,
"learning_rate": 0.00020309278350515462,
"loss": 1.399,
"step": 570
},
{
"epoch": 0.3729903536977492,
"grad_norm": 0.4967169761657715,
"learning_rate": 0.0002010309278350515,
"loss": 1.3441,
"step": 580
},
{
"epoch": 0.37942122186495175,
"grad_norm": 0.44418269395828247,
"learning_rate": 0.00019896907216494843,
"loss": 1.3417,
"step": 590
},
{
"epoch": 0.3858520900321543,
"grad_norm": 0.447294682264328,
"learning_rate": 0.00019690721649484534,
"loss": 1.3011,
"step": 600
},
{
"epoch": 0.3858520900321543,
"eval_loss": 1.331955909729004,
"eval_runtime": 340.7655,
"eval_samples_per_second": 5.869,
"eval_steps_per_second": 0.734,
"step": 600
},
{
"epoch": 0.39228295819935693,
"grad_norm": 0.46866482496261597,
"learning_rate": 0.00019484536082474226,
"loss": 1.3152,
"step": 610
},
{
"epoch": 0.3987138263665595,
"grad_norm": 0.4665449857711792,
"learning_rate": 0.00019278350515463918,
"loss": 1.3253,
"step": 620
},
{
"epoch": 0.40514469453376206,
"grad_norm": 0.4765641391277313,
"learning_rate": 0.00019072164948453606,
"loss": 1.3493,
"step": 630
},
{
"epoch": 0.4115755627009646,
"grad_norm": 0.49187275767326355,
"learning_rate": 0.00018865979381443298,
"loss": 1.3454,
"step": 640
},
{
"epoch": 0.4180064308681672,
"grad_norm": 0.4786391258239746,
"learning_rate": 0.0001865979381443299,
"loss": 1.3464,
"step": 650
},
{
"epoch": 0.42443729903536975,
"grad_norm": 0.5260841846466064,
"learning_rate": 0.00018453608247422679,
"loss": 1.3165,
"step": 660
},
{
"epoch": 0.43086816720257237,
"grad_norm": 0.47295427322387695,
"learning_rate": 0.00018247422680412367,
"loss": 1.3064,
"step": 670
},
{
"epoch": 0.43729903536977494,
"grad_norm": 0.49421828985214233,
"learning_rate": 0.0001804123711340206,
"loss": 1.3315,
"step": 680
},
{
"epoch": 0.4437299035369775,
"grad_norm": 0.4635757505893707,
"learning_rate": 0.0001783505154639175,
"loss": 1.3549,
"step": 690
},
{
"epoch": 0.45016077170418006,
"grad_norm": 0.47637075185775757,
"learning_rate": 0.00017628865979381442,
"loss": 1.3124,
"step": 700
},
{
"epoch": 0.4565916398713826,
"grad_norm": 0.442953884601593,
"learning_rate": 0.00017422680412371134,
"loss": 1.3702,
"step": 710
},
{
"epoch": 0.4630225080385852,
"grad_norm": 0.49394121766090393,
"learning_rate": 0.00017216494845360823,
"loss": 1.3104,
"step": 720
},
{
"epoch": 0.4694533762057878,
"grad_norm": 0.4604041576385498,
"learning_rate": 0.00017010309278350514,
"loss": 1.2666,
"step": 730
},
{
"epoch": 0.4758842443729904,
"grad_norm": 0.45597556233406067,
"learning_rate": 0.00016804123711340206,
"loss": 1.3049,
"step": 740
},
{
"epoch": 0.48231511254019294,
"grad_norm": 0.4338426887989044,
"learning_rate": 0.00016597938144329898,
"loss": 1.3581,
"step": 750
},
{
"epoch": 0.4887459807073955,
"grad_norm": 0.4623814821243286,
"learning_rate": 0.00016391752577319584,
"loss": 1.2777,
"step": 760
},
{
"epoch": 0.49517684887459806,
"grad_norm": 0.4995267689228058,
"learning_rate": 0.00016185567010309275,
"loss": 1.3087,
"step": 770
},
{
"epoch": 0.5016077170418006,
"grad_norm": 0.4469720721244812,
"learning_rate": 0.00015979381443298967,
"loss": 1.2893,
"step": 780
},
{
"epoch": 0.5080385852090032,
"grad_norm": 0.4728144407272339,
"learning_rate": 0.00015773195876288659,
"loss": 1.2496,
"step": 790
},
{
"epoch": 0.5144694533762058,
"grad_norm": 0.4181075096130371,
"learning_rate": 0.0001556701030927835,
"loss": 1.2912,
"step": 800
},
{
"epoch": 0.5144694533762058,
"eval_loss": 1.2991915941238403,
"eval_runtime": 340.7607,
"eval_samples_per_second": 5.869,
"eval_steps_per_second": 0.734,
"step": 800
},
{
"epoch": 0.5209003215434084,
"grad_norm": 0.46053722500801086,
"learning_rate": 0.0001536082474226804,
"loss": 1.2695,
"step": 810
},
{
"epoch": 0.5273311897106109,
"grad_norm": 0.5585050582885742,
"learning_rate": 0.0001515463917525773,
"loss": 1.2968,
"step": 820
},
{
"epoch": 0.5337620578778135,
"grad_norm": 0.4734201431274414,
"learning_rate": 0.00014948453608247422,
"loss": 1.2478,
"step": 830
},
{
"epoch": 0.5401929260450161,
"grad_norm": 0.45543360710144043,
"learning_rate": 0.0001474226804123711,
"loss": 1.3217,
"step": 840
},
{
"epoch": 0.5466237942122186,
"grad_norm": 0.5416027903556824,
"learning_rate": 0.00014536082474226803,
"loss": 1.2522,
"step": 850
},
{
"epoch": 0.5530546623794212,
"grad_norm": 0.42681366205215454,
"learning_rate": 0.00014329896907216494,
"loss": 1.3112,
"step": 860
},
{
"epoch": 0.5594855305466238,
"grad_norm": 0.45384252071380615,
"learning_rate": 0.00014123711340206183,
"loss": 1.2793,
"step": 870
},
{
"epoch": 0.5659163987138264,
"grad_norm": 0.450888454914093,
"learning_rate": 0.00013917525773195875,
"loss": 1.3204,
"step": 880
},
{
"epoch": 0.572347266881029,
"grad_norm": 0.4524657428264618,
"learning_rate": 0.00013711340206185566,
"loss": 1.272,
"step": 890
},
{
"epoch": 0.5787781350482315,
"grad_norm": 0.46384352445602417,
"learning_rate": 0.00013505154639175258,
"loss": 1.2948,
"step": 900
},
{
"epoch": 0.5852090032154341,
"grad_norm": 0.4582608938217163,
"learning_rate": 0.00013298969072164947,
"loss": 1.3027,
"step": 910
},
{
"epoch": 0.5916398713826366,
"grad_norm": 0.4848160147666931,
"learning_rate": 0.00013092783505154639,
"loss": 1.3172,
"step": 920
},
{
"epoch": 0.5980707395498392,
"grad_norm": 0.484418660402298,
"learning_rate": 0.00012886597938144327,
"loss": 1.3117,
"step": 930
},
{
"epoch": 0.6045016077170418,
"grad_norm": 0.39582470059394836,
"learning_rate": 0.0001268041237113402,
"loss": 1.2813,
"step": 940
},
{
"epoch": 0.6109324758842444,
"grad_norm": 0.4482058882713318,
"learning_rate": 0.0001247422680412371,
"loss": 1.2639,
"step": 950
},
{
"epoch": 0.617363344051447,
"grad_norm": 0.49503323435783386,
"learning_rate": 0.000122680412371134,
"loss": 1.302,
"step": 960
},
{
"epoch": 0.6237942122186495,
"grad_norm": 0.42037469148635864,
"learning_rate": 0.00012061855670103093,
"loss": 1.2943,
"step": 970
},
{
"epoch": 0.6302250803858521,
"grad_norm": 0.449434757232666,
"learning_rate": 0.00011855670103092781,
"loss": 1.2941,
"step": 980
},
{
"epoch": 0.6366559485530546,
"grad_norm": 0.44711926579475403,
"learning_rate": 0.00011649484536082473,
"loss": 1.2906,
"step": 990
},
{
"epoch": 0.6430868167202572,
"grad_norm": 0.43769001960754395,
"learning_rate": 0.00011443298969072163,
"loss": 1.3039,
"step": 1000
},
{
"epoch": 0.6430868167202572,
"eval_loss": 1.2755881547927856,
"eval_runtime": 341.9539,
"eval_samples_per_second": 5.849,
"eval_steps_per_second": 0.731,
"step": 1000
},
{
"epoch": 0.6495176848874598,
"grad_norm": 0.4710148870944977,
"learning_rate": 0.00011237113402061855,
"loss": 1.2645,
"step": 1010
},
{
"epoch": 0.6559485530546624,
"grad_norm": 0.44989457726478577,
"learning_rate": 0.00011030927835051547,
"loss": 1.282,
"step": 1020
},
{
"epoch": 0.662379421221865,
"grad_norm": 0.4453730285167694,
"learning_rate": 0.00010824742268041235,
"loss": 1.256,
"step": 1030
},
{
"epoch": 0.6688102893890675,
"grad_norm": 0.5148506164550781,
"learning_rate": 0.00010618556701030927,
"loss": 1.2541,
"step": 1040
},
{
"epoch": 0.6752411575562701,
"grad_norm": 0.43415772914886475,
"learning_rate": 0.00010412371134020617,
"loss": 1.2902,
"step": 1050
},
{
"epoch": 0.6816720257234726,
"grad_norm": 0.4594573974609375,
"learning_rate": 0.00010206185567010309,
"loss": 1.2425,
"step": 1060
},
{
"epoch": 0.6881028938906752,
"grad_norm": 0.4503769278526306,
"learning_rate": 9.999999999999999e-05,
"loss": 1.2896,
"step": 1070
},
{
"epoch": 0.6945337620578779,
"grad_norm": 0.4645858407020569,
"learning_rate": 9.79381443298969e-05,
"loss": 1.2735,
"step": 1080
},
{
"epoch": 0.7009646302250804,
"grad_norm": 0.48965829610824585,
"learning_rate": 9.58762886597938e-05,
"loss": 1.3047,
"step": 1090
},
{
"epoch": 0.707395498392283,
"grad_norm": 0.46829336881637573,
"learning_rate": 9.381443298969071e-05,
"loss": 1.2751,
"step": 1100
},
{
"epoch": 0.7138263665594855,
"grad_norm": 0.4561706483364105,
"learning_rate": 9.175257731958763e-05,
"loss": 1.2858,
"step": 1110
},
{
"epoch": 0.7202572347266881,
"grad_norm": 0.4418432414531708,
"learning_rate": 8.969072164948453e-05,
"loss": 1.2254,
"step": 1120
},
{
"epoch": 0.7266881028938906,
"grad_norm": 0.46051761507987976,
"learning_rate": 8.762886597938145e-05,
"loss": 1.2198,
"step": 1130
},
{
"epoch": 0.7331189710610932,
"grad_norm": 0.48182615637779236,
"learning_rate": 8.556701030927834e-05,
"loss": 1.2631,
"step": 1140
},
{
"epoch": 0.7395498392282959,
"grad_norm": 0.42658165097236633,
"learning_rate": 8.350515463917525e-05,
"loss": 1.2667,
"step": 1150
},
{
"epoch": 0.7459807073954984,
"grad_norm": 0.48048514127731323,
"learning_rate": 8.144329896907215e-05,
"loss": 1.27,
"step": 1160
},
{
"epoch": 0.752411575562701,
"grad_norm": 0.4658808410167694,
"learning_rate": 7.938144329896907e-05,
"loss": 1.2778,
"step": 1170
},
{
"epoch": 0.7588424437299035,
"grad_norm": 0.47830724716186523,
"learning_rate": 7.731958762886596e-05,
"loss": 1.2327,
"step": 1180
},
{
"epoch": 0.7652733118971061,
"grad_norm": 0.4181123971939087,
"learning_rate": 7.525773195876288e-05,
"loss": 1.2559,
"step": 1190
},
{
"epoch": 0.7717041800643086,
"grad_norm": 0.46117720007896423,
"learning_rate": 7.319587628865979e-05,
"loss": 1.2572,
"step": 1200
},
{
"epoch": 0.7717041800643086,
"eval_loss": 1.2570642232894897,
"eval_runtime": 340.7404,
"eval_samples_per_second": 5.87,
"eval_steps_per_second": 0.734,
"step": 1200
},
{
"epoch": 0.7781350482315113,
"grad_norm": 0.4613102078437805,
"learning_rate": 7.11340206185567e-05,
"loss": 1.2659,
"step": 1210
},
{
"epoch": 0.7845659163987139,
"grad_norm": 0.4278354346752167,
"learning_rate": 6.90721649484536e-05,
"loss": 1.2858,
"step": 1220
},
{
"epoch": 0.7909967845659164,
"grad_norm": 0.4664517343044281,
"learning_rate": 6.701030927835051e-05,
"loss": 1.293,
"step": 1230
},
{
"epoch": 0.797427652733119,
"grad_norm": 0.44390469789505005,
"learning_rate": 6.494845360824742e-05,
"loss": 1.2234,
"step": 1240
},
{
"epoch": 0.8038585209003215,
"grad_norm": 0.4815686345100403,
"learning_rate": 6.288659793814433e-05,
"loss": 1.2891,
"step": 1250
},
{
"epoch": 0.8102893890675241,
"grad_norm": 0.43138524889945984,
"learning_rate": 6.0824742268041234e-05,
"loss": 1.2344,
"step": 1260
},
{
"epoch": 0.8167202572347267,
"grad_norm": 0.49563485383987427,
"learning_rate": 5.8762886597938136e-05,
"loss": 1.2214,
"step": 1270
},
{
"epoch": 0.8231511254019293,
"grad_norm": 0.4346306324005127,
"learning_rate": 5.6701030927835046e-05,
"loss": 1.2804,
"step": 1280
},
{
"epoch": 0.8295819935691319,
"grad_norm": 0.4574083685874939,
"learning_rate": 5.463917525773195e-05,
"loss": 1.237,
"step": 1290
},
{
"epoch": 0.8360128617363344,
"grad_norm": 0.43636971712112427,
"learning_rate": 5.257731958762886e-05,
"loss": 1.218,
"step": 1300
},
{
"epoch": 0.842443729903537,
"grad_norm": 0.411658376455307,
"learning_rate": 5.051546391752577e-05,
"loss": 1.2383,
"step": 1310
},
{
"epoch": 0.8488745980707395,
"grad_norm": 0.4198724627494812,
"learning_rate": 4.8453608247422676e-05,
"loss": 1.2599,
"step": 1320
},
{
"epoch": 0.8553054662379421,
"grad_norm": 0.4522201418876648,
"learning_rate": 4.6391752577319585e-05,
"loss": 1.2277,
"step": 1330
},
{
"epoch": 0.8617363344051447,
"grad_norm": 0.4966380298137665,
"learning_rate": 4.4329896907216494e-05,
"loss": 1.2268,
"step": 1340
},
{
"epoch": 0.8681672025723473,
"grad_norm": 0.4492814838886261,
"learning_rate": 4.22680412371134e-05,
"loss": 1.2589,
"step": 1350
},
{
"epoch": 0.8745980707395499,
"grad_norm": 0.4740350544452667,
"learning_rate": 4.0206185567010306e-05,
"loss": 1.2196,
"step": 1360
},
{
"epoch": 0.8810289389067524,
"grad_norm": 0.46403658390045166,
"learning_rate": 3.814432989690721e-05,
"loss": 1.2811,
"step": 1370
},
{
"epoch": 0.887459807073955,
"grad_norm": 0.46236127614974976,
"learning_rate": 3.608247422680412e-05,
"loss": 1.2403,
"step": 1380
},
{
"epoch": 0.8938906752411575,
"grad_norm": 0.44528549909591675,
"learning_rate": 3.402061855670103e-05,
"loss": 1.2374,
"step": 1390
},
{
"epoch": 0.9003215434083601,
"grad_norm": 0.4524483382701874,
"learning_rate": 3.1958762886597937e-05,
"loss": 1.2833,
"step": 1400
},
{
"epoch": 0.9003215434083601,
"eval_loss": 1.2448393106460571,
"eval_runtime": 340.581,
"eval_samples_per_second": 5.872,
"eval_steps_per_second": 0.734,
"step": 1400
}
],
"logging_steps": 10,
"max_steps": 1555,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1406337433445335e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}