File size: 2,988 Bytes
e617a54 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9842931937172775,
"eval_steps": 100,
"global_step": 47,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 4.449197665192237,
"learning_rate": 1e-07,
"logits/chosen": -0.07421875,
"logits/rejected": 1.125,
"logps/chosen": -600.0,
"logps/rejected": -434.0,
"loss": 0.6941,
"rewards/accuracies": 0.3203125,
"rewards/chosen": -0.013427734375,
"rewards/margins": -0.002349853515625,
"rewards/rejected": -0.0111083984375,
"step": 1
},
{
"epoch": 0.21,
"grad_norm": 4.40471809269956,
"learning_rate": 4.82718437161051e-07,
"logits/chosen": -0.189453125,
"logits/rejected": 1.1953125,
"logps/chosen": -640.0,
"logps/rejected": -422.0,
"loss": 0.6939,
"rewards/accuracies": 0.3168402910232544,
"rewards/chosen": -0.01239013671875,
"rewards/margins": -0.002349853515625,
"rewards/rejected": -0.010009765625,
"step": 10
},
{
"epoch": 0.42,
"grad_norm": 4.52228710884134,
"learning_rate": 3.584709347793895e-07,
"logits/chosen": -0.21875,
"logits/rejected": 1.171875,
"logps/chosen": -652.0,
"logps/rejected": -418.0,
"loss": 0.6858,
"rewards/accuracies": 0.5679687261581421,
"rewards/chosen": -0.007415771484375,
"rewards/margins": 0.01336669921875,
"rewards/rejected": -0.020751953125,
"step": 20
},
{
"epoch": 0.63,
"grad_norm": 4.318161511759293,
"learning_rate": 1.763112063972739e-07,
"logits/chosen": -0.236328125,
"logits/rejected": 1.140625,
"logps/chosen": -628.0,
"logps/rejected": -414.0,
"loss": 0.6696,
"rewards/accuracies": 0.87109375,
"rewards/chosen": -0.002685546875,
"rewards/margins": 0.046630859375,
"rewards/rejected": -0.04931640625,
"step": 30
},
{
"epoch": 0.84,
"grad_norm": 4.441538814008028,
"learning_rate": 3.349364905389032e-08,
"logits/chosen": -0.298828125,
"logits/rejected": 1.09375,
"logps/chosen": -636.0,
"logps/rejected": -426.0,
"loss": 0.6618,
"rewards/accuracies": 0.9359375238418579,
"rewards/chosen": -0.00022125244140625,
"rewards/margins": 0.06298828125,
"rewards/rejected": -0.06298828125,
"step": 40
},
{
"epoch": 0.98,
"step": 47,
"total_flos": 0.0,
"train_loss": 0.6752358294547872,
"train_runtime": 2856.2983,
"train_samples_per_second": 2.139,
"train_steps_per_second": 0.016
}
],
"logging_steps": 10,
"max_steps": 47,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|