htlou's picture
Upload folder using huggingface_hub
168f52e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.4925373134328357,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07462686567164178,
"grad_norm": 57.12193824365218,
"learning_rate": 5e-07,
"logits/chosen": -2.716521978378296,
"logits/rejected": -2.7164063453674316,
"logps/chosen": -263.66815185546875,
"logps/rejected": -226.99600219726562,
"loss": 0.6899,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": 0.014930379576981068,
"rewards/margins": 0.005484213586896658,
"rewards/rejected": 0.009446167387068272,
"step": 5
},
{
"epoch": 0.14925373134328357,
"grad_norm": 50.98436154009925,
"learning_rate": 1e-06,
"logits/chosen": -2.659832239151001,
"logits/rejected": -2.6706981658935547,
"logps/chosen": -240.44741821289062,
"logps/rejected": -202.6971893310547,
"loss": 0.6353,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": 0.4928322732448578,
"rewards/margins": 0.11446012556552887,
"rewards/rejected": 0.3783721327781677,
"step": 10
},
{
"epoch": 0.22388059701492538,
"grad_norm": 49.5897720682669,
"learning_rate": 9.983100718730718e-07,
"logits/chosen": -2.469956874847412,
"logits/rejected": -2.457557201385498,
"logps/chosen": -224.4867706298828,
"logps/rejected": -186.89651489257812,
"loss": 0.6504,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": 1.3739125728607178,
"rewards/margins": 0.8372632265090942,
"rewards/rejected": 0.5366495847702026,
"step": 15
},
{
"epoch": 0.29850746268656714,
"grad_norm": 38.57601617352061,
"learning_rate": 9.932517109205849e-07,
"logits/chosen": -2.329310894012451,
"logits/rejected": -2.304492235183716,
"logps/chosen": -216.98519897460938,
"logps/rejected": -203.6713104248047,
"loss": 0.6103,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 1.4170403480529785,
"rewards/margins": 1.1797821521759033,
"rewards/rejected": 0.2372581958770752,
"step": 20
},
{
"epoch": 0.373134328358209,
"grad_norm": 41.87199823484069,
"learning_rate": 9.848591102083375e-07,
"logits/chosen": -2.1940865516662598,
"logits/rejected": -2.1780757904052734,
"logps/chosen": -250.27261352539062,
"logps/rejected": -203.70118713378906,
"loss": 0.5969,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.575352430343628,
"rewards/margins": 1.1331461668014526,
"rewards/rejected": 0.44220608472824097,
"step": 25
},
{
"epoch": 0.44776119402985076,
"grad_norm": 34.36864819717641,
"learning_rate": 9.731890013043367e-07,
"logits/chosen": -2.177009105682373,
"logits/rejected": -2.1458096504211426,
"logps/chosen": -257.85015869140625,
"logps/rejected": -217.74462890625,
"loss": 0.5814,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 1.5730830430984497,
"rewards/margins": 1.5190694332122803,
"rewards/rejected": 0.054013729095458984,
"step": 30
},
{
"epoch": 0.5223880597014925,
"grad_norm": 40.73655696053411,
"learning_rate": 9.583202707897073e-07,
"logits/chosen": -2.2473671436309814,
"logits/rejected": -2.1966567039489746,
"logps/chosen": -247.3220672607422,
"logps/rejected": -202.4593963623047,
"loss": 0.5595,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.881485104560852,
"rewards/margins": 1.353430986404419,
"rewards/rejected": -0.47194600105285645,
"step": 35
},
{
"epoch": 0.5970149253731343,
"grad_norm": 41.80314492549501,
"learning_rate": 9.403534270080829e-07,
"logits/chosen": -2.27215838432312,
"logits/rejected": -2.2764623165130615,
"logps/chosen": -231.4678497314453,
"logps/rejected": -203.9759521484375,
"loss": 0.578,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.9200285077095032,
"rewards/margins": 1.0039114952087402,
"rewards/rejected": -0.0838831439614296,
"step": 40
},
{
"epoch": 0.6716417910447762,
"grad_norm": 38.467845430667744,
"learning_rate": 9.19409920658098e-07,
"logits/chosen": -2.338601589202881,
"logits/rejected": -2.3325324058532715,
"logps/chosen": -247.09738159179688,
"logps/rejected": -206.18026733398438,
"loss": 0.5433,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 1.363398551940918,
"rewards/margins": 1.4603192806243896,
"rewards/rejected": -0.09692087769508362,
"step": 45
},
{
"epoch": 0.746268656716418,
"grad_norm": 37.05742903664642,
"learning_rate": 8.956313238215823e-07,
"logits/chosen": -2.4232733249664307,
"logits/rejected": -2.413396120071411,
"logps/chosen": -252.5549774169922,
"logps/rejected": -190.77883911132812,
"loss": 0.6027,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 1.3205119371414185,
"rewards/margins": 1.3863776922225952,
"rewards/rejected": -0.06586578488349915,
"step": 50
},
{
"epoch": 0.746268656716418,
"eval_logits/chosen": -2.457545757293701,
"eval_logits/rejected": -2.435551404953003,
"eval_logps/chosen": -259.58868408203125,
"eval_logps/rejected": -201.051025390625,
"eval_loss": 0.5490748882293701,
"eval_rewards/accuracies": 0.8208333253860474,
"eval_rewards/chosen": 1.3802965879440308,
"eval_rewards/margins": 1.6785677671432495,
"eval_rewards/rejected": -0.29827114939689636,
"eval_runtime": 125.9419,
"eval_samples_per_second": 15.086,
"eval_steps_per_second": 0.238,
"step": 50
},
{
"epoch": 0.8208955223880597,
"grad_norm": 33.60802656137982,
"learning_rate": 8.691783729769873e-07,
"logits/chosen": -2.4649384021759033,
"logits/rejected": -2.4393675327301025,
"logps/chosen": -247.6337432861328,
"logps/rejected": -225.37429809570312,
"loss": 0.513,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.384879469871521,
"rewards/margins": 1.5727264881134033,
"rewards/rejected": -0.1878468245267868,
"step": 55
},
{
"epoch": 0.8955223880597015,
"grad_norm": 33.47722052756408,
"learning_rate": 8.402298824670029e-07,
"logits/chosen": -2.4343879222869873,
"logits/rejected": -2.429543972015381,
"logps/chosen": -243.7395782470703,
"logps/rejected": -216.7984161376953,
"loss": 0.5325,
"rewards/accuracies": 0.8125,
"rewards/chosen": 1.061714768409729,
"rewards/margins": 1.736096739768982,
"rewards/rejected": -0.6743819713592529,
"step": 60
},
{
"epoch": 0.9701492537313433,
"grad_norm": 30.472130365224405,
"learning_rate": 8.089815357650089e-07,
"logits/chosen": -2.3370931148529053,
"logits/rejected": -2.2993381023406982,
"logps/chosen": -250.01687622070312,
"logps/rejected": -205.3022003173828,
"loss": 0.5415,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 1.2970752716064453,
"rewards/margins": 1.5023232698440552,
"rewards/rejected": -0.2052478790283203,
"step": 65
},
{
"epoch": 1.044776119402985,
"grad_norm": 19.840952878286068,
"learning_rate": 7.756445627110522e-07,
"logits/chosen": -2.2309017181396484,
"logits/rejected": -2.2050414085388184,
"logps/chosen": -237.7880096435547,
"logps/rejected": -231.38265991210938,
"loss": 0.3696,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 1.7277215719223022,
"rewards/margins": 2.358109474182129,
"rewards/rejected": -0.6303879618644714,
"step": 70
},
{
"epoch": 1.1194029850746268,
"grad_norm": 21.240934428524795,
"learning_rate": 7.404443116588547e-07,
"logits/chosen": -2.2093091011047363,
"logits/rejected": -2.1994729042053223,
"logps/chosen": -260.18426513671875,
"logps/rejected": -221.51742553710938,
"loss": 0.2473,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.7431342601776123,
"rewards/margins": 2.91957950592041,
"rewards/rejected": -1.1764450073242188,
"step": 75
},
{
"epoch": 1.1940298507462686,
"grad_norm": 19.68722107584437,
"learning_rate": 7.036187261857288e-07,
"logits/chosen": -2.1707935333251953,
"logits/rejected": -2.151038408279419,
"logps/chosen": -219.67929077148438,
"logps/rejected": -219.01358032226562,
"loss": 0.2201,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.7701470851898193,
"rewards/margins": 3.296602964401245,
"rewards/rejected": -1.5264555215835571,
"step": 80
},
{
"epoch": 1.2686567164179103,
"grad_norm": 19.753765139786655,
"learning_rate": 6.654167366624008e-07,
"logits/chosen": -2.137498140335083,
"logits/rejected": -2.1242105960845947,
"logps/chosen": -235.6524658203125,
"logps/rejected": -222.9441680908203,
"loss": 0.2428,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 1.8704173564910889,
"rewards/margins": 3.3603546619415283,
"rewards/rejected": -1.4899370670318604,
"step": 85
},
{
"epoch": 1.3432835820895521,
"grad_norm": 18.35032830894322,
"learning_rate": 6.260965775552713e-07,
"logits/chosen": -2.0957112312316895,
"logits/rejected": -2.025146961212158,
"logps/chosen": -224.27377319335938,
"logps/rejected": -204.6040496826172,
"loss": 0.2536,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 2.086984395980835,
"rewards/margins": 3.093162775039673,
"rewards/rejected": -1.0061782598495483,
"step": 90
},
{
"epoch": 1.417910447761194,
"grad_norm": 22.277576113780217,
"learning_rate": 5.859240418356614e-07,
"logits/chosen": -2.0116684436798096,
"logits/rejected": -1.9373754262924194,
"logps/chosen": -225.5041046142578,
"logps/rejected": -224.0210723876953,
"loss": 0.2394,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.8472282886505127,
"rewards/margins": 3.6459250450134277,
"rewards/rejected": -1.7986968755722046,
"step": 95
},
{
"epoch": 1.4925373134328357,
"grad_norm": 21.864051938399246,
"learning_rate": 5.451706842957421e-07,
"logits/chosen": -1.985772728919983,
"logits/rejected": -1.9631462097167969,
"logps/chosen": -228.11532592773438,
"logps/rejected": -222.6101837158203,
"loss": 0.2795,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 1.6982160806655884,
"rewards/margins": 3.457064151763916,
"rewards/rejected": -1.758847951889038,
"step": 100
},
{
"epoch": 1.4925373134328357,
"eval_logits/chosen": -1.975551724433899,
"eval_logits/rejected": -1.910164713859558,
"eval_logps/chosen": -261.8016357421875,
"eval_logps/rejected": -213.1614227294922,
"eval_loss": 0.5111880302429199,
"eval_rewards/accuracies": 0.8416666388511658,
"eval_rewards/chosen": 1.159003496170044,
"eval_rewards/margins": 2.6683123111724854,
"eval_rewards/rejected": -1.509308934211731,
"eval_runtime": 126.7081,
"eval_samples_per_second": 14.995,
"eval_steps_per_second": 0.237,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 201,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1178822762299392.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}