taicheng's picture
Model save
72a8978 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 100,
"global_step": 574,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003484320557491289,
"grad_norm": 306.63621441123377,
"learning_rate": 1.0344827586206896e-08,
"logits/chosen": -2.5345611572265625,
"logits/rejected": -2.581700563430786,
"logps/chosen": -60.002105712890625,
"logps/rejected": -99.98374938964844,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03484320557491289,
"grad_norm": 287.10820765444424,
"learning_rate": 1.0344827586206897e-07,
"logits/chosen": -2.5633163452148438,
"logits/rejected": -2.562026023864746,
"logps/chosen": -59.65489196777344,
"logps/rejected": -73.39691925048828,
"loss": 0.6954,
"rewards/accuracies": 0.2152777761220932,
"rewards/chosen": 0.0025260683614760637,
"rewards/margins": 0.011007179506123066,
"rewards/rejected": -0.008481111377477646,
"step": 10
},
{
"epoch": 0.06968641114982578,
"grad_norm": 361.11744918130063,
"learning_rate": 2.0689655172413793e-07,
"logits/chosen": -2.60577654838562,
"logits/rejected": -2.5645222663879395,
"logps/chosen": -104.05818939208984,
"logps/rejected": -94.88358306884766,
"loss": 0.6868,
"rewards/accuracies": 0.3062500059604645,
"rewards/chosen": 0.0365118607878685,
"rewards/margins": 0.03092603012919426,
"rewards/rejected": 0.005585831124335527,
"step": 20
},
{
"epoch": 0.10452961672473868,
"grad_norm": 362.0188620910404,
"learning_rate": 3.103448275862069e-07,
"logits/chosen": -2.593327045440674,
"logits/rejected": -2.573579788208008,
"logps/chosen": -82.2002944946289,
"logps/rejected": -91.45396423339844,
"loss": 0.6711,
"rewards/accuracies": 0.29374998807907104,
"rewards/chosen": 0.15702371299266815,
"rewards/margins": 0.1113327145576477,
"rewards/rejected": 0.04569100961089134,
"step": 30
},
{
"epoch": 0.13937282229965156,
"grad_norm": 283.91931715411215,
"learning_rate": 4.1379310344827586e-07,
"logits/chosen": -2.4966464042663574,
"logits/rejected": -2.4948105812072754,
"logps/chosen": -77.7404556274414,
"logps/rejected": -73.33540344238281,
"loss": 0.6457,
"rewards/accuracies": 0.3125,
"rewards/chosen": 0.09431798756122589,
"rewards/margins": 0.27138587832450867,
"rewards/rejected": -0.17706790566444397,
"step": 40
},
{
"epoch": 0.17421602787456447,
"grad_norm": 251.26626326645746,
"learning_rate": 5.172413793103448e-07,
"logits/chosen": -2.5199971199035645,
"logits/rejected": -2.5240330696105957,
"logps/chosen": -62.982337951660156,
"logps/rejected": -75.54759216308594,
"loss": 0.6705,
"rewards/accuracies": 0.2750000059604645,
"rewards/chosen": 0.5071097612380981,
"rewards/margins": 0.21250581741333008,
"rewards/rejected": 0.2946038842201233,
"step": 50
},
{
"epoch": 0.20905923344947736,
"grad_norm": 245.4785949332324,
"learning_rate": 5.98062015503876e-07,
"logits/chosen": -2.473501682281494,
"logits/rejected": -2.4677951335906982,
"logps/chosen": -70.63660430908203,
"logps/rejected": -66.41564178466797,
"loss": 0.6582,
"rewards/accuracies": 0.32499998807907104,
"rewards/chosen": 1.3620513677597046,
"rewards/margins": 0.30580419301986694,
"rewards/rejected": 1.0562469959259033,
"step": 60
},
{
"epoch": 0.24390243902439024,
"grad_norm": 268.1673884309266,
"learning_rate": 5.883720930232558e-07,
"logits/chosen": -2.48606538772583,
"logits/rejected": -2.4808874130249023,
"logps/chosen": -60.53791046142578,
"logps/rejected": -65.51335906982422,
"loss": 0.669,
"rewards/accuracies": 0.3187499940395355,
"rewards/chosen": 1.9600938558578491,
"rewards/margins": 0.41606950759887695,
"rewards/rejected": 1.5440242290496826,
"step": 70
},
{
"epoch": 0.2787456445993031,
"grad_norm": 266.9849201621109,
"learning_rate": 5.786821705426356e-07,
"logits/chosen": -2.44217586517334,
"logits/rejected": -2.432021379470825,
"logps/chosen": -71.77671813964844,
"logps/rejected": -74.41423797607422,
"loss": 0.6931,
"rewards/accuracies": 0.3062500059604645,
"rewards/chosen": 2.0607221126556396,
"rewards/margins": 0.417347252368927,
"rewards/rejected": 1.643375039100647,
"step": 80
},
{
"epoch": 0.313588850174216,
"grad_norm": 299.0911881932943,
"learning_rate": 5.689922480620155e-07,
"logits/chosen": -2.486508846282959,
"logits/rejected": -2.5009188652038574,
"logps/chosen": -62.32392120361328,
"logps/rejected": -67.05072784423828,
"loss": 0.745,
"rewards/accuracies": 0.29374998807907104,
"rewards/chosen": 1.938929557800293,
"rewards/margins": 0.3153690993785858,
"rewards/rejected": 1.6235605478286743,
"step": 90
},
{
"epoch": 0.34843205574912894,
"grad_norm": 316.56572577244293,
"learning_rate": 5.593023255813953e-07,
"logits/chosen": -2.4774889945983887,
"logits/rejected": -2.4782986640930176,
"logps/chosen": -71.84193420410156,
"logps/rejected": -78.91864013671875,
"loss": 0.7136,
"rewards/accuracies": 0.32499998807907104,
"rewards/chosen": 1.8174957036972046,
"rewards/margins": 0.686999499797821,
"rewards/rejected": 1.1304961442947388,
"step": 100
},
{
"epoch": 0.34843205574912894,
"eval_logits/chosen": -2.561777114868164,
"eval_logits/rejected": -2.545793294906616,
"eval_logps/chosen": -71.84760284423828,
"eval_logps/rejected": -79.50099182128906,
"eval_loss": 0.7108728885650635,
"eval_rewards/accuracies": 0.3511904776096344,
"eval_rewards/chosen": 1.4011276960372925,
"eval_rewards/margins": 0.5386245846748352,
"eval_rewards/rejected": 0.8625030517578125,
"eval_runtime": 113.5305,
"eval_samples_per_second": 17.616,
"eval_steps_per_second": 0.555,
"step": 100
},
{
"epoch": 0.3832752613240418,
"grad_norm": 345.3192427581726,
"learning_rate": 5.496124031007752e-07,
"logits/chosen": -2.500793933868408,
"logits/rejected": -2.4660115242004395,
"logps/chosen": -72.03620910644531,
"logps/rejected": -62.499176025390625,
"loss": 0.7594,
"rewards/accuracies": 0.28125,
"rewards/chosen": 0.6009193658828735,
"rewards/margins": 0.3298734426498413,
"rewards/rejected": 0.2710459232330322,
"step": 110
},
{
"epoch": 0.4181184668989547,
"grad_norm": 243.7872401824864,
"learning_rate": 5.399224806201551e-07,
"logits/chosen": -2.5411460399627686,
"logits/rejected": -2.510791778564453,
"logps/chosen": -76.94505310058594,
"logps/rejected": -67.3199462890625,
"loss": 0.704,
"rewards/accuracies": 0.29374998807907104,
"rewards/chosen": 0.27375704050064087,
"rewards/margins": 0.6173890829086304,
"rewards/rejected": -0.3436321020126343,
"step": 120
},
{
"epoch": 0.4529616724738676,
"grad_norm": 432.4880968925023,
"learning_rate": 5.302325581395349e-07,
"logits/chosen": -2.5748209953308105,
"logits/rejected": -2.5566792488098145,
"logps/chosen": -83.82911682128906,
"logps/rejected": -89.06166076660156,
"loss": 0.7703,
"rewards/accuracies": 0.35624998807907104,
"rewards/chosen": 0.027380788698792458,
"rewards/margins": 0.8505465388298035,
"rewards/rejected": -0.823165774345398,
"step": 130
},
{
"epoch": 0.4878048780487805,
"grad_norm": 213.43504772096108,
"learning_rate": 5.205426356589147e-07,
"logits/chosen": -2.4663920402526855,
"logits/rejected": -2.456141233444214,
"logps/chosen": -79.90180969238281,
"logps/rejected": -71.20616149902344,
"loss": 0.7043,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": 0.6591196060180664,
"rewards/margins": 0.893332302570343,
"rewards/rejected": -0.23421280086040497,
"step": 140
},
{
"epoch": 0.5226480836236934,
"grad_norm": 314.59295726808995,
"learning_rate": 5.108527131782946e-07,
"logits/chosen": -2.5461785793304443,
"logits/rejected": -2.5047733783721924,
"logps/chosen": -77.75875091552734,
"logps/rejected": -79.21798706054688,
"loss": 0.7534,
"rewards/accuracies": 0.30000001192092896,
"rewards/chosen": 0.9145609140396118,
"rewards/margins": 0.7381798624992371,
"rewards/rejected": 0.17638106644153595,
"step": 150
},
{
"epoch": 0.5574912891986062,
"grad_norm": 288.3107962146333,
"learning_rate": 5.011627906976744e-07,
"logits/chosen": -2.5273547172546387,
"logits/rejected": -2.5466020107269287,
"logps/chosen": -62.79814910888672,
"logps/rejected": -71.2327880859375,
"loss": 0.7312,
"rewards/accuracies": 0.26249998807907104,
"rewards/chosen": 0.9151515960693359,
"rewards/margins": 0.5228849649429321,
"rewards/rejected": 0.3922666311264038,
"step": 160
},
{
"epoch": 0.5923344947735192,
"grad_norm": 324.81976956861786,
"learning_rate": 4.914728682170542e-07,
"logits/chosen": -2.5521976947784424,
"logits/rejected": -2.5384509563446045,
"logps/chosen": -66.97964477539062,
"logps/rejected": -75.58006286621094,
"loss": 0.7191,
"rewards/accuracies": 0.29374998807907104,
"rewards/chosen": 1.0535058975219727,
"rewards/margins": 0.664495587348938,
"rewards/rejected": 0.3890102505683899,
"step": 170
},
{
"epoch": 0.627177700348432,
"grad_norm": 292.82490566453185,
"learning_rate": 4.817829457364341e-07,
"logits/chosen": -2.5870025157928467,
"logits/rejected": -2.5761828422546387,
"logps/chosen": -88.94615936279297,
"logps/rejected": -84.6361083984375,
"loss": 0.7825,
"rewards/accuracies": 0.33125001192092896,
"rewards/chosen": 1.5428403615951538,
"rewards/margins": 0.5494336485862732,
"rewards/rejected": 0.9934068918228149,
"step": 180
},
{
"epoch": 0.662020905923345,
"grad_norm": 234.8773688372816,
"learning_rate": 4.7209302325581395e-07,
"logits/chosen": -2.5796515941619873,
"logits/rejected": -2.57136607170105,
"logps/chosen": -68.72258758544922,
"logps/rejected": -79.8985595703125,
"loss": 0.7347,
"rewards/accuracies": 0.2874999940395355,
"rewards/chosen": 1.2343099117279053,
"rewards/margins": 0.37060627341270447,
"rewards/rejected": 0.8637038469314575,
"step": 190
},
{
"epoch": 0.6968641114982579,
"grad_norm": 439.89262521049136,
"learning_rate": 4.6240310077519373e-07,
"logits/chosen": -2.605844259262085,
"logits/rejected": -2.612717390060425,
"logps/chosen": -87.73692321777344,
"logps/rejected": -90.65494537353516,
"loss": 0.7461,
"rewards/accuracies": 0.36250001192092896,
"rewards/chosen": 1.5472986698150635,
"rewards/margins": 0.8075531125068665,
"rewards/rejected": 0.739745557308197,
"step": 200
},
{
"epoch": 0.6968641114982579,
"eval_logits/chosen": -2.5759267807006836,
"eval_logits/rejected": -2.5600757598876953,
"eval_logps/chosen": -72.48377227783203,
"eval_logps/rejected": -80.43267822265625,
"eval_loss": 0.7642679214477539,
"eval_rewards/accuracies": 0.3273809552192688,
"eval_rewards/chosen": 1.063955545425415,
"eval_rewards/margins": 0.6952447295188904,
"eval_rewards/rejected": 0.36871081590652466,
"eval_runtime": 113.5756,
"eval_samples_per_second": 17.609,
"eval_steps_per_second": 0.555,
"step": 200
},
{
"epoch": 0.7317073170731707,
"grad_norm": 437.59995338780175,
"learning_rate": 4.527131782945735e-07,
"logits/chosen": -2.5814366340637207,
"logits/rejected": -2.556798219680786,
"logps/chosen": -67.51020812988281,
"logps/rejected": -63.342933654785156,
"loss": 0.7354,
"rewards/accuracies": 0.35624998807907104,
"rewards/chosen": 0.9586971402168274,
"rewards/margins": 0.9261114001274109,
"rewards/rejected": 0.03258571773767471,
"step": 210
},
{
"epoch": 0.7665505226480837,
"grad_norm": 270.9450930053244,
"learning_rate": 4.4302325581395346e-07,
"logits/chosen": -2.6260294914245605,
"logits/rejected": -2.6077115535736084,
"logps/chosen": -71.33647155761719,
"logps/rejected": -70.29251861572266,
"loss": 0.7602,
"rewards/accuracies": 0.23125000298023224,
"rewards/chosen": 1.1245003938674927,
"rewards/margins": 0.3980458974838257,
"rewards/rejected": 0.7264544367790222,
"step": 220
},
{
"epoch": 0.8013937282229965,
"grad_norm": 365.5474496656497,
"learning_rate": 4.3333333333333335e-07,
"logits/chosen": -2.6431806087493896,
"logits/rejected": -2.623713493347168,
"logps/chosen": -86.96244812011719,
"logps/rejected": -87.527587890625,
"loss": 0.7878,
"rewards/accuracies": 0.375,
"rewards/chosen": 1.6102378368377686,
"rewards/margins": 1.2809720039367676,
"rewards/rejected": 0.329265832901001,
"step": 230
},
{
"epoch": 0.8362369337979094,
"grad_norm": 311.80252726932434,
"learning_rate": 4.2364341085271313e-07,
"logits/chosen": -2.643277406692505,
"logits/rejected": -2.609691619873047,
"logps/chosen": -83.11528015136719,
"logps/rejected": -77.69151306152344,
"loss": 0.8033,
"rewards/accuracies": 0.36250001192092896,
"rewards/chosen": 1.9219558238983154,
"rewards/margins": 0.6263306736946106,
"rewards/rejected": 1.2956254482269287,
"step": 240
},
{
"epoch": 0.8710801393728222,
"grad_norm": 352.8980579572314,
"learning_rate": 4.13953488372093e-07,
"logits/chosen": -2.65216064453125,
"logits/rejected": -2.617506504058838,
"logps/chosen": -91.78350067138672,
"logps/rejected": -87.70478820800781,
"loss": 0.6512,
"rewards/accuracies": 0.38749998807907104,
"rewards/chosen": 1.942857027053833,
"rewards/margins": 0.7549124956130981,
"rewards/rejected": 1.1879446506500244,
"step": 250
},
{
"epoch": 0.9059233449477352,
"grad_norm": 265.7817814419997,
"learning_rate": 4.0426356589147286e-07,
"logits/chosen": -2.5617775917053223,
"logits/rejected": -2.5759947299957275,
"logps/chosen": -56.67157745361328,
"logps/rejected": -64.38258361816406,
"loss": 0.7944,
"rewards/accuracies": 0.30000001192092896,
"rewards/chosen": 1.26022469997406,
"rewards/margins": 0.6000041961669922,
"rewards/rejected": 0.6602205038070679,
"step": 260
},
{
"epoch": 0.9407665505226481,
"grad_norm": 513.5414604532187,
"learning_rate": 3.9457364341085264e-07,
"logits/chosen": -2.6499624252319336,
"logits/rejected": -2.650109052658081,
"logps/chosen": -66.84712219238281,
"logps/rejected": -82.05715942382812,
"loss": 0.7273,
"rewards/accuracies": 0.3375000059604645,
"rewards/chosen": 1.0775161981582642,
"rewards/margins": 0.8259714841842651,
"rewards/rejected": 0.2515445053577423,
"step": 270
},
{
"epoch": 0.975609756097561,
"grad_norm": 338.03485816068525,
"learning_rate": 3.848837209302326e-07,
"logits/chosen": -2.550506591796875,
"logits/rejected": -2.529942512512207,
"logps/chosen": -65.82142639160156,
"logps/rejected": -70.44654846191406,
"loss": 0.6677,
"rewards/accuracies": 0.3187499940395355,
"rewards/chosen": 1.0668643712997437,
"rewards/margins": 0.8612996339797974,
"rewards/rejected": 0.20556476712226868,
"step": 280
},
{
"epoch": 1.0104529616724738,
"grad_norm": 43.71804076421765,
"learning_rate": 3.7519379844961237e-07,
"logits/chosen": -2.5674736499786377,
"logits/rejected": -2.54020357131958,
"logps/chosen": -68.04539489746094,
"logps/rejected": -65.61439514160156,
"loss": 0.5817,
"rewards/accuracies": 0.3812499940395355,
"rewards/chosen": 2.1318130493164062,
"rewards/margins": 2.503080368041992,
"rewards/rejected": -0.37126731872558594,
"step": 290
},
{
"epoch": 1.0452961672473868,
"grad_norm": 14.731335253505609,
"learning_rate": 3.6550387596899226e-07,
"logits/chosen": -2.6065874099731445,
"logits/rejected": -2.5927734375,
"logps/chosen": -59.9798698425293,
"logps/rejected": -77.5359115600586,
"loss": 0.3949,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": 3.481590747833252,
"rewards/margins": 7.915454864501953,
"rewards/rejected": -4.433863639831543,
"step": 300
},
{
"epoch": 1.0452961672473868,
"eval_logits/chosen": -2.6291775703430176,
"eval_logits/rejected": -2.6134917736053467,
"eval_logps/chosen": -74.10063934326172,
"eval_logps/rejected": -82.3265380859375,
"eval_loss": 0.7874619364738464,
"eval_rewards/accuracies": 0.3472222089767456,
"eval_rewards/chosen": 0.20701055228710175,
"eval_rewards/margins": 0.8420494794845581,
"eval_rewards/rejected": -0.6350388526916504,
"eval_runtime": 113.6067,
"eval_samples_per_second": 17.605,
"eval_steps_per_second": 0.555,
"step": 300
},
{
"epoch": 1.0801393728222997,
"grad_norm": 4.834789646446964,
"learning_rate": 3.558139534883721e-07,
"logits/chosen": -2.578672409057617,
"logits/rejected": -2.5800602436065674,
"logps/chosen": -61.31939697265625,
"logps/rejected": -86.94207000732422,
"loss": 0.4017,
"rewards/accuracies": 0.4375,
"rewards/chosen": 3.34814715385437,
"rewards/margins": 9.546571731567383,
"rewards/rejected": -6.198423862457275,
"step": 310
},
{
"epoch": 1.1149825783972125,
"grad_norm": 99.9815974317529,
"learning_rate": 3.46124031007752e-07,
"logits/chosen": -2.6197047233581543,
"logits/rejected": -2.6059112548828125,
"logps/chosen": -68.153076171875,
"logps/rejected": -89.3502426147461,
"loss": 0.3832,
"rewards/accuracies": 0.46875,
"rewards/chosen": 3.07761812210083,
"rewards/margins": 8.979107856750488,
"rewards/rejected": -5.9014892578125,
"step": 320
},
{
"epoch": 1.1498257839721253,
"grad_norm": 65.16657207151003,
"learning_rate": 3.3643410852713177e-07,
"logits/chosen": -2.6080145835876465,
"logits/rejected": -2.584524631500244,
"logps/chosen": -77.22286224365234,
"logps/rejected": -88.59306335449219,
"loss": 0.3571,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 3.6562671661376953,
"rewards/margins": 8.938417434692383,
"rewards/rejected": -5.282149791717529,
"step": 330
},
{
"epoch": 1.1846689895470384,
"grad_norm": 20.064441334453125,
"learning_rate": 3.267441860465116e-07,
"logits/chosen": -2.58263897895813,
"logits/rejected": -2.5866923332214355,
"logps/chosen": -76.30878448486328,
"logps/rejected": -106.47459411621094,
"loss": 0.3758,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 3.9426658153533936,
"rewards/margins": 9.874300003051758,
"rewards/rejected": -5.931633949279785,
"step": 340
},
{
"epoch": 1.2195121951219512,
"grad_norm": 71.91053721699969,
"learning_rate": 3.170542635658915e-07,
"logits/chosen": -2.5975940227508545,
"logits/rejected": -2.565171003341675,
"logps/chosen": -61.74261474609375,
"logps/rejected": -74.67215728759766,
"loss": 0.3855,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 3.632841110229492,
"rewards/margins": 8.399141311645508,
"rewards/rejected": -4.766300201416016,
"step": 350
},
{
"epoch": 1.254355400696864,
"grad_norm": 78.70073770358177,
"learning_rate": 3.073643410852713e-07,
"logits/chosen": -2.59340238571167,
"logits/rejected": -2.566377639770508,
"logps/chosen": -65.64441680908203,
"logps/rejected": -72.45703125,
"loss": 0.3676,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": 3.2890095710754395,
"rewards/margins": 6.889220237731934,
"rewards/rejected": -3.6002116203308105,
"step": 360
},
{
"epoch": 1.289198606271777,
"grad_norm": 42.412792949129724,
"learning_rate": 2.9767441860465116e-07,
"logits/chosen": -2.5649404525756836,
"logits/rejected": -2.5833497047424316,
"logps/chosen": -64.13664245605469,
"logps/rejected": -83.73193359375,
"loss": 0.399,
"rewards/accuracies": 0.46875,
"rewards/chosen": 3.6725857257843018,
"rewards/margins": 8.653525352478027,
"rewards/rejected": -4.980940818786621,
"step": 370
},
{
"epoch": 1.32404181184669,
"grad_norm": 89.8718675271705,
"learning_rate": 2.87984496124031e-07,
"logits/chosen": -2.576333999633789,
"logits/rejected": -2.5752272605895996,
"logps/chosen": -77.921875,
"logps/rejected": -98.92057800292969,
"loss": 0.3699,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 4.744899749755859,
"rewards/margins": 11.345683097839355,
"rewards/rejected": -6.600783348083496,
"step": 380
},
{
"epoch": 1.3588850174216027,
"grad_norm": 53.88569450482403,
"learning_rate": 2.7829457364341084e-07,
"logits/chosen": -2.6578681468963623,
"logits/rejected": -2.6431689262390137,
"logps/chosen": -59.8434944152832,
"logps/rejected": -80.90740203857422,
"loss": 0.3745,
"rewards/accuracies": 0.4437499940395355,
"rewards/chosen": 3.790491819381714,
"rewards/margins": 8.903260231018066,
"rewards/rejected": -5.112768650054932,
"step": 390
},
{
"epoch": 1.3937282229965158,
"grad_norm": 103.59808023860636,
"learning_rate": 2.686046511627907e-07,
"logits/chosen": -2.638619899749756,
"logits/rejected": -2.609290361404419,
"logps/chosen": -78.6235122680664,
"logps/rejected": -108.10832214355469,
"loss": 0.3838,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 3.987473249435425,
"rewards/margins": 9.130887985229492,
"rewards/rejected": -5.1434149742126465,
"step": 400
},
{
"epoch": 1.3937282229965158,
"eval_logits/chosen": -2.6421802043914795,
"eval_logits/rejected": -2.626558542251587,
"eval_logps/chosen": -73.66181945800781,
"eval_logps/rejected": -82.45710754394531,
"eval_loss": 0.871368408203125,
"eval_rewards/accuracies": 0.329365074634552,
"eval_rewards/chosen": 0.4395846724510193,
"eval_rewards/margins": 1.1438220739364624,
"eval_rewards/rejected": -0.7042374610900879,
"eval_runtime": 113.5108,
"eval_samples_per_second": 17.619,
"eval_steps_per_second": 0.555,
"step": 400
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.4652582217397593,
"learning_rate": 2.589147286821705e-07,
"logits/chosen": -2.625115156173706,
"logits/rejected": -2.6144914627075195,
"logps/chosen": -74.08203887939453,
"logps/rejected": -89.65892791748047,
"loss": 0.5043,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 4.521576881408691,
"rewards/margins": 9.441361427307129,
"rewards/rejected": -4.919784069061279,
"step": 410
},
{
"epoch": 1.4634146341463414,
"grad_norm": 14.542963331044218,
"learning_rate": 2.492248062015504e-07,
"logits/chosen": -2.6700663566589355,
"logits/rejected": -2.671051502227783,
"logps/chosen": -70.02223205566406,
"logps/rejected": -93.91789245605469,
"loss": 0.3962,
"rewards/accuracies": 0.4437499940395355,
"rewards/chosen": 3.217597484588623,
"rewards/margins": 8.012170791625977,
"rewards/rejected": -4.794573783874512,
"step": 420
},
{
"epoch": 1.4982578397212545,
"grad_norm": 7.616609761831896,
"learning_rate": 2.3953488372093024e-07,
"logits/chosen": -2.6445345878601074,
"logits/rejected": -2.630586624145508,
"logps/chosen": -61.7302360534668,
"logps/rejected": -81.66353607177734,
"loss": 0.3812,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": 3.6740145683288574,
"rewards/margins": 8.53648567199707,
"rewards/rejected": -4.862471580505371,
"step": 430
},
{
"epoch": 1.533101045296167,
"grad_norm": 19.40227524240624,
"learning_rate": 2.2984496124031007e-07,
"logits/chosen": -2.639118194580078,
"logits/rejected": -2.648719310760498,
"logps/chosen": -61.67310333251953,
"logps/rejected": -81.26171875,
"loss": 0.3814,
"rewards/accuracies": 0.4312500059604645,
"rewards/chosen": 4.401946067810059,
"rewards/margins": 8.5157470703125,
"rewards/rejected": -4.113801002502441,
"step": 440
},
{
"epoch": 1.5679442508710801,
"grad_norm": 11.176099808562324,
"learning_rate": 2.201550387596899e-07,
"logits/chosen": -2.7013635635375977,
"logits/rejected": -2.6666698455810547,
"logps/chosen": -81.5969009399414,
"logps/rejected": -96.87474060058594,
"loss": 0.3926,
"rewards/accuracies": 0.518750011920929,
"rewards/chosen": 4.899897575378418,
"rewards/margins": 10.683164596557617,
"rewards/rejected": -5.783267021179199,
"step": 450
},
{
"epoch": 1.6027874564459932,
"grad_norm": 123.87369972777346,
"learning_rate": 2.1046511627906974e-07,
"logits/chosen": -2.6888933181762695,
"logits/rejected": -2.6685612201690674,
"logps/chosen": -66.90978240966797,
"logps/rejected": -88.9155044555664,
"loss": 0.4047,
"rewards/accuracies": 0.5,
"rewards/chosen": 4.9129533767700195,
"rewards/margins": 11.225677490234375,
"rewards/rejected": -6.312723636627197,
"step": 460
},
{
"epoch": 1.6376306620209058,
"grad_norm": 3.43139199879915,
"learning_rate": 2.0077519379844966e-07,
"logits/chosen": -2.6837058067321777,
"logits/rejected": -2.674848794937134,
"logps/chosen": -54.61207962036133,
"logps/rejected": -79.80362701416016,
"loss": 0.3979,
"rewards/accuracies": 0.40625,
"rewards/chosen": 3.5458691120147705,
"rewards/margins": 7.8562331199646,
"rewards/rejected": -4.310364246368408,
"step": 470
},
{
"epoch": 1.6724738675958188,
"grad_norm": 12.439142544759255,
"learning_rate": 1.9108527131782944e-07,
"logits/chosen": -2.695263385772705,
"logits/rejected": -2.6781885623931885,
"logps/chosen": -48.82516860961914,
"logps/rejected": -57.3747444152832,
"loss": 0.4033,
"rewards/accuracies": 0.3687500059604645,
"rewards/chosen": 2.775264024734497,
"rewards/margins": 5.633866786956787,
"rewards/rejected": -2.8586020469665527,
"step": 480
},
{
"epoch": 1.7073170731707317,
"grad_norm": 40.080099001713826,
"learning_rate": 1.8139534883720925e-07,
"logits/chosen": -2.6758790016174316,
"logits/rejected": -2.6650328636169434,
"logps/chosen": -66.64186096191406,
"logps/rejected": -78.38505554199219,
"loss": 0.4696,
"rewards/accuracies": 0.38749998807907104,
"rewards/chosen": 3.4873664379119873,
"rewards/margins": 7.898496150970459,
"rewards/rejected": -4.411130428314209,
"step": 490
},
{
"epoch": 1.7421602787456445,
"grad_norm": 47.95492405495267,
"learning_rate": 1.7170542635658914e-07,
"logits/chosen": -2.593526601791382,
"logits/rejected": -2.5879125595092773,
"logps/chosen": -68.72013854980469,
"logps/rejected": -92.43871307373047,
"loss": 0.371,
"rewards/accuracies": 0.46875,
"rewards/chosen": 4.010916709899902,
"rewards/margins": 8.630084991455078,
"rewards/rejected": -4.619168281555176,
"step": 500
},
{
"epoch": 1.7421602787456445,
"eval_logits/chosen": -2.7067761421203613,
"eval_logits/rejected": -2.691006898880005,
"eval_logps/chosen": -73.18505859375,
"eval_logps/rejected": -82.15363311767578,
"eval_loss": 0.863908588886261,
"eval_rewards/accuracies": 0.3392857015132904,
"eval_rewards/chosen": 0.6922710537910461,
"eval_rewards/margins": 1.2356635332107544,
"eval_rewards/rejected": -0.5433923602104187,
"eval_runtime": 113.7753,
"eval_samples_per_second": 17.579,
"eval_steps_per_second": 0.554,
"step": 500
},
{
"epoch": 1.7770034843205575,
"grad_norm": 43.89133813372592,
"learning_rate": 1.6201550387596898e-07,
"logits/chosen": -2.665417432785034,
"logits/rejected": -2.647148609161377,
"logps/chosen": -63.23058319091797,
"logps/rejected": -77.56340026855469,
"loss": 0.3821,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": 3.9886889457702637,
"rewards/margins": 8.217727661132812,
"rewards/rejected": -4.229039192199707,
"step": 510
},
{
"epoch": 1.8118466898954704,
"grad_norm": 26.582330424497826,
"learning_rate": 1.523255813953488e-07,
"logits/chosen": -2.671867847442627,
"logits/rejected": -2.666949510574341,
"logps/chosen": -66.01771545410156,
"logps/rejected": -86.70332336425781,
"loss": 0.382,
"rewards/accuracies": 0.46875,
"rewards/chosen": 4.374355316162109,
"rewards/margins": 9.404329299926758,
"rewards/rejected": -5.029973983764648,
"step": 520
},
{
"epoch": 1.8466898954703832,
"grad_norm": 16.01608311357976,
"learning_rate": 1.426356589147287e-07,
"logits/chosen": -2.6440272331237793,
"logits/rejected": -2.6383345127105713,
"logps/chosen": -63.14166259765625,
"logps/rejected": -80.24067687988281,
"loss": 0.3916,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": 4.34307336807251,
"rewards/margins": 9.047937393188477,
"rewards/rejected": -4.704863548278809,
"step": 530
},
{
"epoch": 1.8815331010452963,
"grad_norm": 503.79684025145957,
"learning_rate": 1.3294573643410851e-07,
"logits/chosen": -2.6588096618652344,
"logits/rejected": -2.6732683181762695,
"logps/chosen": -53.71875,
"logps/rejected": -84.29718780517578,
"loss": 0.46,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": 5.3063740730285645,
"rewards/margins": 11.112980842590332,
"rewards/rejected": -5.806607246398926,
"step": 540
},
{
"epoch": 1.916376306620209,
"grad_norm": 1.5289785290366227,
"learning_rate": 1.2325581395348835e-07,
"logits/chosen": -2.636369228363037,
"logits/rejected": -2.6182668209075928,
"logps/chosen": -79.08727264404297,
"logps/rejected": -97.51543426513672,
"loss": 0.3689,
"rewards/accuracies": 0.5,
"rewards/chosen": 6.226182460784912,
"rewards/margins": 11.859712600708008,
"rewards/rejected": -5.633530139923096,
"step": 550
},
{
"epoch": 1.951219512195122,
"grad_norm": 96.76132190493311,
"learning_rate": 1.1356589147286824e-07,
"logits/chosen": -2.64457368850708,
"logits/rejected": -2.651458740234375,
"logps/chosen": -55.0025520324707,
"logps/rejected": -76.64137268066406,
"loss": 0.3985,
"rewards/accuracies": 0.4437499940395355,
"rewards/chosen": 4.620251655578613,
"rewards/margins": 7.983218193054199,
"rewards/rejected": -3.362966537475586,
"step": 560
},
{
"epoch": 1.986062717770035,
"grad_norm": 13.582049528683124,
"learning_rate": 1.0387596899224806e-07,
"logits/chosen": -2.7313754558563232,
"logits/rejected": -2.702322483062744,
"logps/chosen": -56.71698760986328,
"logps/rejected": -67.86329650878906,
"loss": 0.3789,
"rewards/accuracies": 0.4437499940395355,
"rewards/chosen": 4.503007411956787,
"rewards/margins": 7.31237268447876,
"rewards/rejected": -2.8093647956848145,
"step": 570
},
{
"epoch": 2.0,
"step": 574,
"total_flos": 0.0,
"train_loss": 0.5567006942287139,
"train_runtime": 6469.9642,
"train_samples_per_second": 5.669,
"train_steps_per_second": 0.089
}
],
"logging_steps": 10,
"max_steps": 574,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}