zephyr-7b-gemma-kto / trainer_state.json
chrlu's picture
Model save
cfd6611 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.971563981042654,
"eval_steps": 100,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018957345971563982,
"grad_norm": 66.10108494411732,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": 117.53560638427734,
"logits/rejected": 126.8960952758789,
"logps/chosen": -335.40118408203125,
"logps/rejected": -439.16552734375,
"loss": 0.5,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.1895734597156398,
"grad_norm": 63.35028062693078,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": 135.05430603027344,
"logits/rejected": 138.40155029296875,
"logps/chosen": -395.61114501953125,
"logps/rejected": -438.9917907714844,
"loss": 0.4945,
"rewards/accuracies": 0.4097222089767456,
"rewards/chosen": 0.019270293414592743,
"rewards/margins": 0.0024831907358020544,
"rewards/rejected": 0.016787106171250343,
"step": 10
},
{
"epoch": 0.3791469194312796,
"grad_norm": 30.009186267506557,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": 122.25505065917969,
"logits/rejected": 125.93106842041016,
"logps/chosen": -353.450439453125,
"logps/rejected": -405.93060302734375,
"loss": 0.41,
"rewards/accuracies": 0.5687500238418579,
"rewards/chosen": 0.9480773210525513,
"rewards/margins": 0.24503827095031738,
"rewards/rejected": 0.7030390501022339,
"step": 20
},
{
"epoch": 0.5687203791469194,
"grad_norm": 25.296178390352267,
"learning_rate": 4.5025027361734613e-07,
"logits/chosen": 143.24757385253906,
"logits/rejected": 136.6645965576172,
"logps/chosen": -379.17156982421875,
"logps/rejected": -435.64471435546875,
"loss": 0.3504,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 0.6646836996078491,
"rewards/margins": 1.4468095302581787,
"rewards/rejected": -0.7821259498596191,
"step": 30
},
{
"epoch": 0.7582938388625592,
"grad_norm": 24.801763821402933,
"learning_rate": 3.893311157806091e-07,
"logits/chosen": 134.1096649169922,
"logits/rejected": 123.01851654052734,
"logps/chosen": -322.86090087890625,
"logps/rejected": -364.0258483886719,
"loss": 0.3439,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": 1.5666635036468506,
"rewards/margins": 1.874193549156189,
"rewards/rejected": -0.30753010511398315,
"step": 40
},
{
"epoch": 0.9478672985781991,
"grad_norm": 25.32318087440862,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": 153.5646514892578,
"logits/rejected": 156.97068786621094,
"logps/chosen": -381.78765869140625,
"logps/rejected": -481.5089416503906,
"loss": 0.3097,
"rewards/accuracies": 0.71875,
"rewards/chosen": 1.8678098917007446,
"rewards/margins": 2.126939296722412,
"rewards/rejected": -0.2591293752193451,
"step": 50
},
{
"epoch": 1.1374407582938388,
"grad_norm": 20.749611901077518,
"learning_rate": 2.2891223348923882e-07,
"logits/chosen": 143.95127868652344,
"logits/rejected": 148.13980102539062,
"logps/chosen": -360.61602783203125,
"logps/rejected": -458.1600036621094,
"loss": 0.2569,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.8694829940795898,
"rewards/margins": 2.9592061042785645,
"rewards/rejected": -1.0897233486175537,
"step": 60
},
{
"epoch": 1.3270142180094786,
"grad_norm": 18.9652516165648,
"learning_rate": 1.4754491880085317e-07,
"logits/chosen": 138.2149658203125,
"logits/rejected": 139.26931762695312,
"logps/chosen": -327.8791198730469,
"logps/rejected": -428.8116760253906,
"loss": 0.2361,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.9632362127304077,
"rewards/margins": 2.72812819480896,
"rewards/rejected": -0.7648921608924866,
"step": 70
},
{
"epoch": 1.5165876777251186,
"grad_norm": 19.639368765017288,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": 125.30033111572266,
"logits/rejected": 141.14437866210938,
"logps/chosen": -311.32623291015625,
"logps/rejected": -434.94378662109375,
"loss": 0.2086,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 2.116239547729492,
"rewards/margins": 3.2015445232391357,
"rewards/rejected": -1.0853049755096436,
"step": 80
},
{
"epoch": 1.7061611374407581,
"grad_norm": 21.053763020863467,
"learning_rate": 2.7440387297912122e-08,
"logits/chosen": 125.8764419555664,
"logits/rejected": 138.4185028076172,
"logps/chosen": -340.38427734375,
"logps/rejected": -458.72796630859375,
"loss": 0.2018,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 2.3023386001586914,
"rewards/margins": 3.3546459674835205,
"rewards/rejected": -1.052307367324829,
"step": 90
},
{
"epoch": 1.8957345971563981,
"grad_norm": 20.135689425911313,
"learning_rate": 2.27878296044029e-09,
"logits/chosen": 132.50669860839844,
"logits/rejected": 132.55801391601562,
"logps/chosen": -332.8918151855469,
"logps/rejected": -427.8233947753906,
"loss": 0.1942,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 2.4541642665863037,
"rewards/margins": 3.0071334838867188,
"rewards/rejected": -0.5529693365097046,
"step": 100
},
{
"epoch": 1.8957345971563981,
"eval_logits/chosen": 111.29891967773438,
"eval_logits/rejected": 106.04138946533203,
"eval_logps/chosen": -332.04876708984375,
"eval_logps/rejected": -363.3305358886719,
"eval_loss": 0.2925301492214203,
"eval_rewards/accuracies": 0.6770833134651184,
"eval_rewards/chosen": 1.5809730291366577,
"eval_rewards/margins": 1.64395010471344,
"eval_rewards/rejected": -0.06297732144594193,
"eval_runtime": 121.1488,
"eval_samples_per_second": 6.191,
"eval_steps_per_second": 0.198,
"step": 100
},
{
"epoch": 1.971563981042654,
"step": 104,
"total_flos": 0.0,
"train_loss": 0.29734468632019484,
"train_runtime": 2303.4906,
"train_samples_per_second": 5.861,
"train_steps_per_second": 0.045
}
],
"logging_steps": 10,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}