OpenELM-450M_lora / trainer_state.json
ghemdd's picture
Model save
5a6ed88 verified
raw
history blame contribute delete
No virus
19.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9992520568436799,
"eval_steps": 300,
"global_step": 334,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0029917726252804786,
"grad_norm": 0.515625,
"learning_rate": 1.4705882352941176e-08,
"logits/chosen": -11.9029541015625,
"logits/rejected": -11.867537498474121,
"logps/chosen": -446.77239990234375,
"logps/rejected": -451.92742919921875,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.029917726252804786,
"grad_norm": 2.84375,
"learning_rate": 1.4705882352941175e-07,
"logits/chosen": -11.970257759094238,
"logits/rejected": -11.80525016784668,
"logps/chosen": -548.0371704101562,
"logps/rejected": -528.268798828125,
"loss": 0.693,
"rewards/accuracies": 0.4184027910232544,
"rewards/chosen": 0.0011305785737931728,
"rewards/margins": 0.0008499751565977931,
"rewards/rejected": 0.0002806035045068711,
"step": 10
},
{
"epoch": 0.05983545250560957,
"grad_norm": 1.484375,
"learning_rate": 2.941176470588235e-07,
"logits/chosen": -12.091507911682129,
"logits/rejected": -11.931904792785645,
"logps/chosen": -605.2603149414062,
"logps/rejected": -594.6846923828125,
"loss": 0.6946,
"rewards/accuracies": 0.4781250059604645,
"rewards/chosen": -0.0016426166985183954,
"rewards/margins": -0.001760849030688405,
"rewards/rejected": 0.00011823121167253703,
"step": 20
},
{
"epoch": 0.08975317875841436,
"grad_norm": 0.8125,
"learning_rate": 4.4117647058823526e-07,
"logits/chosen": -12.251832962036133,
"logits/rejected": -11.93907356262207,
"logps/chosen": -627.1741333007812,
"logps/rejected": -592.0576782226562,
"loss": 0.6927,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.013599636033177376,
"rewards/margins": 0.001697429222986102,
"rewards/rejected": -0.015297065488994122,
"step": 30
},
{
"epoch": 0.11967090501121914,
"grad_norm": 0.7734375,
"learning_rate": 4.995066821070679e-07,
"logits/chosen": -12.038887023925781,
"logits/rejected": -11.891900062561035,
"logps/chosen": -573.0090942382812,
"logps/rejected": -555.4964599609375,
"loss": 0.6966,
"rewards/accuracies": 0.4390625059604645,
"rewards/chosen": -0.009484687820076942,
"rewards/margins": -0.005814659409224987,
"rewards/rejected": -0.0036700288765132427,
"step": 40
},
{
"epoch": 0.14958863126402394,
"grad_norm": 1.1796875,
"learning_rate": 4.964990092676262e-07,
"logits/chosen": -12.046560287475586,
"logits/rejected": -11.745036125183105,
"logps/chosen": -487.1591796875,
"logps/rejected": -467.3463439941406,
"loss": 0.6961,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": -0.014206953346729279,
"rewards/margins": -0.0039894962683320045,
"rewards/rejected": -0.010217458009719849,
"step": 50
},
{
"epoch": 0.17950635751682872,
"grad_norm": 0.65234375,
"learning_rate": 4.907906416994145e-07,
"logits/chosen": -11.951323509216309,
"logits/rejected": -11.81787109375,
"logps/chosen": -524.3377685546875,
"logps/rejected": -518.5364990234375,
"loss": 0.6951,
"rewards/accuracies": 0.4906249940395355,
"rewards/chosen": -0.007045179605484009,
"rewards/margins": -0.0024539525620639324,
"rewards/rejected": -0.004591226577758789,
"step": 60
},
{
"epoch": 0.2094240837696335,
"grad_norm": 0.7109375,
"learning_rate": 4.824441214720628e-07,
"logits/chosen": -12.155089378356934,
"logits/rejected": -11.935697555541992,
"logps/chosen": -614.5479736328125,
"logps/rejected": -598.7986450195312,
"loss": 0.694,
"rewards/accuracies": 0.4921875,
"rewards/chosen": -0.004119172692298889,
"rewards/margins": -0.0007487249677069485,
"rewards/rejected": -0.003370448248460889,
"step": 70
},
{
"epoch": 0.2393418100224383,
"grad_norm": 1.125,
"learning_rate": 4.7155089480780365e-07,
"logits/chosen": -12.310976028442383,
"logits/rejected": -11.926130294799805,
"logps/chosen": -590.0772705078125,
"logps/rejected": -552.1112060546875,
"loss": 0.694,
"rewards/accuracies": 0.4703125059604645,
"rewards/chosen": -0.014592505991458893,
"rewards/margins": -0.000543795176781714,
"rewards/rejected": -0.014048713259398937,
"step": 80
},
{
"epoch": 0.26925953627524307,
"grad_norm": 1.7890625,
"learning_rate": 4.582303101775248e-07,
"logits/chosen": -12.02568244934082,
"logits/rejected": -11.863547325134277,
"logps/chosen": -596.1324462890625,
"logps/rejected": -583.9989624023438,
"loss": 0.6946,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": -0.00669059157371521,
"rewards/margins": -0.0019042941275984049,
"rewards/rejected": -0.004786298610270023,
"step": 90
},
{
"epoch": 0.2991772625280479,
"grad_norm": 1.1015625,
"learning_rate": 4.426283106939473e-07,
"logits/chosen": -12.22739028930664,
"logits/rejected": -11.815451622009277,
"logps/chosen": -604.3762817382812,
"logps/rejected": -574.3903198242188,
"loss": 0.6949,
"rewards/accuracies": 0.4828124940395355,
"rewards/chosen": 0.0030080166179686785,
"rewards/margins": -0.0021901517175137997,
"rewards/rejected": 0.0051981681026518345,
"step": 100
},
{
"epoch": 0.32909498878085264,
"grad_norm": 1.453125,
"learning_rate": 4.249158351283413e-07,
"logits/chosen": -12.154890060424805,
"logits/rejected": -11.859006881713867,
"logps/chosen": -524.4495849609375,
"logps/rejected": -509.00946044921875,
"loss": 0.6954,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": 0.002577757928520441,
"rewards/margins": -0.002684831153601408,
"rewards/rejected": 0.005262589547783136,
"step": 110
},
{
"epoch": 0.35901271503365745,
"grad_norm": 3.0625,
"learning_rate": 4.0528694506957754e-07,
"logits/chosen": -12.192368507385254,
"logits/rejected": -12.028105735778809,
"logps/chosen": -528.8504028320312,
"logps/rejected": -517.0180053710938,
"loss": 0.693,
"rewards/accuracies": 0.5015624761581421,
"rewards/chosen": 0.007784114684909582,
"rewards/margins": 0.0019344612956047058,
"rewards/rejected": 0.005849653389304876,
"step": 120
},
{
"epoch": 0.3889304412864622,
"grad_norm": 1.0546875,
"learning_rate": 3.839566987447491e-07,
"logits/chosen": -12.14574146270752,
"logits/rejected": -12.06743335723877,
"logps/chosen": -646.7266845703125,
"logps/rejected": -643.1847534179688,
"loss": 0.6923,
"rewards/accuracies": 0.4906249940395355,
"rewards/chosen": -0.00342937046661973,
"rewards/margins": 0.0035477778874337673,
"rewards/rejected": -0.006977148354053497,
"step": 130
},
{
"epoch": 0.418848167539267,
"grad_norm": 0.7421875,
"learning_rate": 3.6115879479623183e-07,
"logits/chosen": -11.896639823913574,
"logits/rejected": -11.759775161743164,
"logps/chosen": -548.5411376953125,
"logps/rejected": -533.6222534179688,
"loss": 0.6943,
"rewards/accuracies": 0.4984374940395355,
"rewards/chosen": -0.006624075584113598,
"rewards/margins": -0.00133815489243716,
"rewards/rejected": -0.005285919643938541,
"step": 140
},
{
"epoch": 0.4487658937920718,
"grad_norm": 0.87109375,
"learning_rate": 3.371430118304538e-07,
"logits/chosen": -11.987521171569824,
"logits/rejected": -11.82154655456543,
"logps/chosen": -599.1915283203125,
"logps/rejected": -586.5440673828125,
"loss": 0.6945,
"rewards/accuracies": 0.4593749940395355,
"rewards/chosen": -0.0005397430504672229,
"rewards/margins": -0.001651047496125102,
"rewards/rejected": 0.0011113042710348964,
"step": 150
},
{
"epoch": 0.4786836200448766,
"grad_norm": 2.078125,
"learning_rate": 3.121724717912138e-07,
"logits/chosen": -12.010518074035645,
"logits/rejected": -11.801678657531738,
"logps/chosen": -477.989013671875,
"logps/rejected": -461.06695556640625,
"loss": 0.6923,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.002107062842696905,
"rewards/margins": 0.002797911176458001,
"rewards/rejected": -0.00490497425198555,
"step": 160
},
{
"epoch": 0.5086013462976814,
"grad_norm": 1.046875,
"learning_rate": 2.865207571406029e-07,
"logits/chosen": -12.087748527526855,
"logits/rejected": -11.823697090148926,
"logps/chosen": -565.2094116210938,
"logps/rejected": -535.0115966796875,
"loss": 0.6945,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": -0.010714416392147541,
"rewards/margins": -0.0014880959643051028,
"rewards/rejected": -0.009226320311427116,
"step": 170
},
{
"epoch": 0.5385190725504861,
"grad_norm": 0.69140625,
"learning_rate": 2.6046891343229986e-07,
"logits/chosen": -12.05897331237793,
"logits/rejected": -11.826770782470703,
"logps/chosen": -639.4141845703125,
"logps/rejected": -625.544189453125,
"loss": 0.694,
"rewards/accuracies": 0.4703125059604645,
"rewards/chosen": -0.008839543908834457,
"rewards/margins": -0.0007707075565122068,
"rewards/rejected": -0.008068837225437164,
"step": 180
},
{
"epoch": 0.5684367988032909,
"grad_norm": 2.15625,
"learning_rate": 2.3430237011767164e-07,
"logits/chosen": -12.140950202941895,
"logits/rejected": -11.830968856811523,
"logps/chosen": -564.7281494140625,
"logps/rejected": -540.7762451171875,
"loss": 0.6931,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.00785023346543312,
"rewards/margins": 0.0012117780279368162,
"rewards/rejected": -0.00906201172620058,
"step": 190
},
{
"epoch": 0.5983545250560958,
"grad_norm": 0.64453125,
"learning_rate": 2.0830781332097445e-07,
"logits/chosen": -12.064250946044922,
"logits/rejected": -11.847702026367188,
"logps/chosen": -601.1182861328125,
"logps/rejected": -584.1577758789062,
"loss": 0.6932,
"rewards/accuracies": 0.46562498807907104,
"rewards/chosen": -0.0038043882232159376,
"rewards/margins": 0.0006765492144040763,
"rewards/rejected": -0.0044809365645051,
"step": 200
},
{
"epoch": 0.6282722513089005,
"grad_norm": 0.6796875,
"learning_rate": 1.8277004484618357e-07,
"logits/chosen": -12.159658432006836,
"logits/rejected": -11.93867015838623,
"logps/chosen": -570.8473510742188,
"logps/rejected": -558.4066162109375,
"loss": 0.6956,
"rewards/accuracies": 0.46562498807907104,
"rewards/chosen": -0.01011698879301548,
"rewards/margins": -0.003837681608274579,
"rewards/rejected": -0.006279306020587683,
"step": 210
},
{
"epoch": 0.6581899775617053,
"grad_norm": 0.90625,
"learning_rate": 1.579688618288305e-07,
"logits/chosen": -12.136404991149902,
"logits/rejected": -11.933469772338867,
"logps/chosen": -653.62841796875,
"logps/rejected": -629.8015747070312,
"loss": 0.6917,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": 0.0027369544841349125,
"rewards/margins": 0.003812599228695035,
"rewards/rejected": -0.0010756452102214098,
"step": 220
},
{
"epoch": 0.6881077038145101,
"grad_norm": 1.046875,
"learning_rate": 1.341759912200346e-07,
"logits/chosen": -12.061933517456055,
"logits/rejected": -11.955540657043457,
"logps/chosen": -541.6209716796875,
"logps/rejected": -530.1668701171875,
"loss": 0.69,
"rewards/accuracies": 0.526562511920929,
"rewards/chosen": 0.006943908985704184,
"rewards/margins": 0.007641922682523727,
"rewards/rejected": -0.0006980125908739865,
"step": 230
},
{
"epoch": 0.7180254300673149,
"grad_norm": 2.078125,
"learning_rate": 1.11652112689164e-07,
"logits/chosen": -11.996115684509277,
"logits/rejected": -11.906721115112305,
"logps/chosen": -514.420166015625,
"logps/rejected": -510.46307373046875,
"loss": 0.6932,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": 0.004082207567989826,
"rewards/margins": 0.0010683867149055004,
"rewards/rejected": 0.003013820853084326,
"step": 240
},
{
"epoch": 0.7479431563201197,
"grad_norm": 0.60546875,
"learning_rate": 9.064400256282755e-08,
"logits/chosen": -11.868894577026367,
"logits/rejected": -11.739983558654785,
"logps/chosen": -636.687744140625,
"logps/rejected": -622.6748657226562,
"loss": 0.6937,
"rewards/accuracies": 0.4984374940395355,
"rewards/chosen": 0.007722427137196064,
"rewards/margins": -8.60728334828309e-07,
"rewards/rejected": 0.007723286747932434,
"step": 250
},
{
"epoch": 0.7778608825729244,
"grad_norm": 0.625,
"learning_rate": 7.138183009179921e-08,
"logits/chosen": -11.95314884185791,
"logits/rejected": -11.800286293029785,
"logps/chosen": -516.5999755859375,
"logps/rejected": -502.53424072265625,
"loss": 0.6941,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.003324865596368909,
"rewards/margins": -0.0007609135354869068,
"rewards/rejected": 0.004085779655724764,
"step": 260
},
{
"epoch": 0.8077786088257293,
"grad_norm": 2.03125,
"learning_rate": 5.4076635668540065e-08,
"logits/chosen": -11.870991706848145,
"logits/rejected": -11.772178649902344,
"logps/chosen": -537.8231201171875,
"logps/rejected": -536.79296875,
"loss": 0.6933,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": -0.0003947736695408821,
"rewards/margins": 0.0007606123690493405,
"rewards/rejected": -0.0011553869117051363,
"step": 270
},
{
"epoch": 0.837696335078534,
"grad_norm": 0.59765625,
"learning_rate": 3.8918018624496286e-08,
"logits/chosen": -12.036161422729492,
"logits/rejected": -11.860921859741211,
"logps/chosen": -554.2711791992188,
"logps/rejected": -541.3438720703125,
"loss": 0.6953,
"rewards/accuracies": 0.4781250059604645,
"rewards/chosen": 0.00031470804242417216,
"rewards/margins": -0.0031545311212539673,
"rewards/rejected": 0.0034692403860390186,
"step": 280
},
{
"epoch": 0.8676140613313388,
"grad_norm": 2.578125,
"learning_rate": 2.6072059940146772e-08,
"logits/chosen": -12.018091201782227,
"logits/rejected": -11.894245147705078,
"logps/chosen": -569.8294677734375,
"logps/rejected": -563.2034912109375,
"loss": 0.6936,
"rewards/accuracies": 0.4921875,
"rewards/chosen": 0.004260566085577011,
"rewards/margins": 0.0009015941177494824,
"rewards/rejected": 0.003358972491696477,
"step": 290
},
{
"epoch": 0.8975317875841436,
"grad_norm": 1.015625,
"learning_rate": 1.5679502627027136e-08,
"logits/chosen": -12.114303588867188,
"logits/rejected": -11.98996639251709,
"logps/chosen": -567.474853515625,
"logps/rejected": -552.4931640625,
"loss": 0.6943,
"rewards/accuracies": 0.46406251192092896,
"rewards/chosen": 0.0020927421282976866,
"rewards/margins": -0.0002106330794049427,
"rewards/rejected": 0.0023033744655549526,
"step": 300
},
{
"epoch": 0.8975317875841436,
"eval_logits/chosen": -12.036696434020996,
"eval_logits/rejected": -11.858353614807129,
"eval_logps/chosen": -579.9698486328125,
"eval_logps/rejected": -567.8892822265625,
"eval_loss": 0.6939929127693176,
"eval_rewards/accuracies": 0.4747757911682129,
"eval_rewards/chosen": 0.0061609940603375435,
"eval_rewards/margins": -0.00024100362497847527,
"eval_rewards/rejected": 0.006401997059583664,
"eval_runtime": 2311.0773,
"eval_samples_per_second": 2.314,
"eval_steps_per_second": 0.289,
"step": 300
},
{
"epoch": 0.9274495138369484,
"grad_norm": 0.7890625,
"learning_rate": 7.85420971784223e-09,
"logits/chosen": -12.027294158935547,
"logits/rejected": -11.775075912475586,
"logps/chosen": -637.2244262695312,
"logps/rejected": -614.4982299804688,
"loss": 0.6929,
"rewards/accuracies": 0.49687498807907104,
"rewards/chosen": 0.008133328519761562,
"rewards/margins": 0.0024830640759319067,
"rewards/rejected": 0.005650263279676437,
"step": 310
},
{
"epoch": 0.9573672400897532,
"grad_norm": 0.99609375,
"learning_rate": 2.6819167592529168e-09,
"logits/chosen": -11.920928955078125,
"logits/rejected": -11.692464828491211,
"logps/chosen": -622.5579223632812,
"logps/rejected": -609.9359130859375,
"loss": 0.693,
"rewards/accuracies": 0.48906248807907104,
"rewards/chosen": 0.012452816590666771,
"rewards/margins": 0.0013520055217668414,
"rewards/rejected": 0.011100810021162033,
"step": 320
},
{
"epoch": 0.9872849663425579,
"grad_norm": 1.734375,
"learning_rate": 2.1929247528540418e-10,
"logits/chosen": -12.089780807495117,
"logits/rejected": -11.94688606262207,
"logps/chosen": -568.0908203125,
"logps/rejected": -560.9391479492188,
"loss": 0.695,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": 0.013248354196548462,
"rewards/margins": -0.0018901375588029623,
"rewards/rejected": 0.015138491988182068,
"step": 330
},
{
"epoch": 0.9992520568436799,
"step": 334,
"total_flos": 0.0,
"train_loss": 0.6938572500994106,
"train_runtime": 20825.8975,
"train_samples_per_second": 1.027,
"train_steps_per_second": 0.016
}
],
"logging_steps": 10,
"max_steps": 334,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 300,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}