|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9997382884061764, |
|
"eval_steps": 100, |
|
"global_step": 955, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5.208333333333333e-09, |
|
"logits/chosen": -2.676934003829956, |
|
"logits/rejected": -2.509021043777466, |
|
"logps/chosen": -304.709228515625, |
|
"logps/rejected": -229.49505615234375, |
|
"loss": 0.6931, |
|
"pred_label": 0.0, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1, |
|
"use_label": 10.0 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.976717112922002e-07, |
|
"logits/chosen": -2.4758388996124268, |
|
"logits/rejected": -2.4836206436157227, |
|
"logps/chosen": -273.62322998046875, |
|
"logps/rejected": -258.89813232421875, |
|
"loss": 0.6823, |
|
"pred_label": 156.05050659179688, |
|
"rewards/accuracies": 0.4886363744735718, |
|
"rewards/chosen": -0.002133187372237444, |
|
"rewards/margins": -0.0010164172854274511, |
|
"rewards/rejected": -0.0011167696211487055, |
|
"step": 100, |
|
"use_label": 653.9495239257812 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.3946449359720607e-07, |
|
"logits/chosen": -2.4878945350646973, |
|
"logits/rejected": -2.4750781059265137, |
|
"logps/chosen": -267.72540283203125, |
|
"logps/rejected": -250.30291748046875, |
|
"loss": 0.6801, |
|
"pred_label": 512.7750244140625, |
|
"rewards/accuracies": 0.5575000047683716, |
|
"rewards/chosen": 0.003957623615860939, |
|
"rewards/margins": 0.008218127302825451, |
|
"rewards/rejected": -0.0042605032213032246, |
|
"step": 200, |
|
"use_label": 1889.2249755859375 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.812572759022118e-07, |
|
"logits/chosen": -2.47322940826416, |
|
"logits/rejected": -2.4768893718719482, |
|
"logps/chosen": -272.0399169921875, |
|
"logps/rejected": -244.90228271484375, |
|
"loss": 0.6733, |
|
"pred_label": 943.1199951171875, |
|
"rewards/accuracies": 0.6075000166893005, |
|
"rewards/chosen": 0.010850328952074051, |
|
"rewards/margins": 0.01955023780465126, |
|
"rewards/rejected": -0.008699909783899784, |
|
"step": 300, |
|
"use_label": 3058.8798828125 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.230500582072177e-07, |
|
"logits/chosen": -2.486250400543213, |
|
"logits/rejected": -2.466102361679077, |
|
"logps/chosen": -270.7090759277344, |
|
"logps/rejected": -252.9911346435547, |
|
"loss": 0.6635, |
|
"pred_label": 1496.31005859375, |
|
"rewards/accuracies": 0.628125011920929, |
|
"rewards/chosen": 0.01578013226389885, |
|
"rewards/margins": 0.0331539586186409, |
|
"rewards/rejected": -0.01737382635474205, |
|
"step": 400, |
|
"use_label": 4105.68994140625 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.648428405122235e-07, |
|
"logits/chosen": -2.46098256111145, |
|
"logits/rejected": -2.446549654006958, |
|
"logps/chosen": -259.1031799316406, |
|
"logps/rejected": -241.98345947265625, |
|
"loss": 0.6543, |
|
"pred_label": 2180.89990234375, |
|
"rewards/accuracies": 0.6175000071525574, |
|
"rewards/chosen": 0.019046209752559662, |
|
"rewards/margins": 0.03530467674136162, |
|
"rewards/rejected": -0.016258466988801956, |
|
"step": 500, |
|
"use_label": 5021.10009765625 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 2.0663562281722933e-07, |
|
"logits/chosen": -2.4896275997161865, |
|
"logits/rejected": -2.4663710594177246, |
|
"logps/chosen": -271.8394470214844, |
|
"logps/rejected": -252.83351135253906, |
|
"loss": 0.646, |
|
"pred_label": 2942.87255859375, |
|
"rewards/accuracies": 0.6600000262260437, |
|
"rewards/chosen": 0.02863229252398014, |
|
"rewards/margins": 0.05473264306783676, |
|
"rewards/rejected": -0.02610035613179207, |
|
"step": 600, |
|
"use_label": 5859.12744140625 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4842840512223514e-07, |
|
"logits/chosen": -2.485153913497925, |
|
"logits/rejected": -2.472170352935791, |
|
"logps/chosen": -269.14117431640625, |
|
"logps/rejected": -238.4977569580078, |
|
"loss": 0.6421, |
|
"pred_label": 3756.925048828125, |
|
"rewards/accuracies": 0.6418750286102295, |
|
"rewards/chosen": 0.03320219740271568, |
|
"rewards/margins": 0.05917687341570854, |
|
"rewards/rejected": -0.02597467601299286, |
|
"step": 700, |
|
"use_label": 6645.0751953125 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 9.022118742724097e-08, |
|
"logits/chosen": -2.4880199432373047, |
|
"logits/rejected": -2.5014524459838867, |
|
"logps/chosen": -276.52313232421875, |
|
"logps/rejected": -252.34681701660156, |
|
"loss": 0.6386, |
|
"pred_label": 4625.42236328125, |
|
"rewards/accuracies": 0.640625, |
|
"rewards/chosen": 0.03754829242825508, |
|
"rewards/margins": 0.06682833284139633, |
|
"rewards/rejected": -0.02928004413843155, |
|
"step": 800, |
|
"use_label": 7376.57763671875 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 3.20139697322468e-08, |
|
"logits/chosen": -2.4699513912200928, |
|
"logits/rejected": -2.485213279724121, |
|
"logps/chosen": -266.04229736328125, |
|
"logps/rejected": -247.93341064453125, |
|
"loss": 0.6393, |
|
"pred_label": 5488.7001953125, |
|
"rewards/accuracies": 0.6587499976158142, |
|
"rewards/chosen": 0.03272656351327896, |
|
"rewards/margins": 0.06201673671603203, |
|
"rewards/rejected": -0.02929016388952732, |
|
"step": 900, |
|
"use_label": 8113.2998046875 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_logits/chosen": -2.5063796043395996, |
|
"eval_logits/rejected": -2.5199685096740723, |
|
"eval_logps/chosen": -272.9034729003906, |
|
"eval_logps/rejected": -250.30796813964844, |
|
"eval_loss": 0.6366299986839294, |
|
"eval_pred_label": 6565.14404296875, |
|
"eval_rewards/accuracies": 0.6320000290870667, |
|
"eval_rewards/chosen": 0.03307868540287018, |
|
"eval_rewards/margins": 0.06870328634977341, |
|
"eval_rewards/rejected": -0.03562460467219353, |
|
"eval_runtime": 443.034, |
|
"eval_samples_per_second": 4.514, |
|
"eval_steps_per_second": 0.282, |
|
"eval_use_label": 8966.8564453125, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 955, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6567496789063458, |
|
"train_runtime": 24439.922, |
|
"train_samples_per_second": 2.501, |
|
"train_steps_per_second": 0.039 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 955, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|