|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.027617212588311, |
|
"eval_steps": 500, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 6.844920635223389, |
|
"learning_rate": 5e-05, |
|
"logits/chosen": -3.09375, |
|
"logits/rejected": -3.0625, |
|
"logps/chosen": -258.0, |
|
"logps/rejected": -322.0, |
|
"loss": 1.0697, |
|
"max_memory_allocated (GB)": 94.25, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.30078125, |
|
"rewards/chosen": 0.67578125, |
|
"rewards/margins": -0.4140625, |
|
"rewards/rejected": 1.0859375, |
|
"step": 10, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 4.300484657287598, |
|
"learning_rate": 0.0001, |
|
"logits/chosen": -3.046875, |
|
"logits/rejected": -3.03125, |
|
"logps/chosen": -286.0, |
|
"logps/rejected": -336.0, |
|
"loss": 0.5736, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.69140625, |
|
"rewards/chosen": -0.150390625, |
|
"rewards/margins": 0.8046875, |
|
"rewards/rejected": -0.95703125, |
|
"step": 20, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 1.9487425088882446, |
|
"learning_rate": 0.00015, |
|
"logits/chosen": -2.859375, |
|
"logits/rejected": -2.84375, |
|
"logps/chosen": -294.0, |
|
"logps/rejected": -394.0, |
|
"loss": 0.2394, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": -1.984375, |
|
"rewards/margins": 4.8125, |
|
"rewards/rejected": -6.8125, |
|
"step": 30, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 1.0650818347930908, |
|
"learning_rate": 0.0002, |
|
"logits/chosen": -2.875, |
|
"logits/rejected": -2.828125, |
|
"logps/chosen": -284.0, |
|
"logps/rejected": -388.0, |
|
"loss": 0.1181, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.95703125, |
|
"rewards/chosen": -0.62109375, |
|
"rewards/margins": 5.0625, |
|
"rewards/rejected": -5.6875, |
|
"step": 40, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.5032941699028015, |
|
"learning_rate": 0.00025, |
|
"logits/chosen": -2.890625, |
|
"logits/rejected": -2.875, |
|
"logps/chosen": -278.0, |
|
"logps/rejected": -412.0, |
|
"loss": 0.0583, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": -0.8828125, |
|
"rewards/margins": 6.96875, |
|
"rewards/rejected": -7.84375, |
|
"step": 50, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.8771920800209045, |
|
"learning_rate": 0.0003, |
|
"logits/chosen": -2.828125, |
|
"logits/rejected": -2.828125, |
|
"logps/chosen": -312.0, |
|
"logps/rejected": -474.0, |
|
"loss": 0.0469, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": -2.609375, |
|
"rewards/margins": 11.5, |
|
"rewards/rejected": -14.125, |
|
"step": 60, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.42379140853881836, |
|
"learning_rate": 0.00035, |
|
"logits/chosen": -2.84375, |
|
"logits/rejected": -2.84375, |
|
"logps/chosen": -280.0, |
|
"logps/rejected": -436.0, |
|
"loss": 0.0285, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.9921875, |
|
"rewards/chosen": -0.9609375, |
|
"rewards/margins": 9.9375, |
|
"rewards/rejected": -10.9375, |
|
"step": 70, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 1.5195761919021606, |
|
"learning_rate": 0.0004, |
|
"logits/chosen": -2.78125, |
|
"logits/rejected": -2.765625, |
|
"logps/chosen": -292.0, |
|
"logps/rejected": -450.0, |
|
"loss": 0.0266, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.98828125, |
|
"rewards/chosen": -1.4453125, |
|
"rewards/margins": 10.75, |
|
"rewards/rejected": -12.1875, |
|
"step": 80, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 2.770358085632324, |
|
"learning_rate": 0.00045000000000000004, |
|
"logits/chosen": -2.921875, |
|
"logits/rejected": -2.9375, |
|
"logps/chosen": -276.0, |
|
"logps/rejected": -478.0, |
|
"loss": 0.0486, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": -1.5703125, |
|
"rewards/margins": 13.375, |
|
"rewards/rejected": -15.0, |
|
"step": 90, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.1556715965270996, |
|
"learning_rate": 0.0005, |
|
"logits/chosen": -2.75, |
|
"logits/rejected": -2.71875, |
|
"logps/chosen": -284.0, |
|
"logps/rejected": -482.0, |
|
"loss": 0.053, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.9765625, |
|
"rewards/chosen": -1.3125, |
|
"rewards/margins": 13.0, |
|
"rewards/rejected": -14.375, |
|
"step": 100, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.40476056933403015, |
|
"learning_rate": 0.0004998477067547739, |
|
"logits/chosen": -2.875, |
|
"logits/rejected": -2.875, |
|
"logps/chosen": -296.0, |
|
"logps/rejected": -444.0, |
|
"loss": 0.0384, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.98828125, |
|
"rewards/chosen": -1.2109375, |
|
"rewards/margins": 10.5, |
|
"rewards/rejected": -11.75, |
|
"step": 110, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.8060222864151001, |
|
"learning_rate": 0.0004993910125649561, |
|
"logits/chosen": -3.234375, |
|
"logits/rejected": -3.234375, |
|
"logps/chosen": -282.0, |
|
"logps/rejected": -456.0, |
|
"loss": 0.0183, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.9921875, |
|
"rewards/chosen": -0.76171875, |
|
"rewards/margins": 12.4375, |
|
"rewards/rejected": -13.1875, |
|
"step": 120, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.5584326982498169, |
|
"learning_rate": 0.0004986304738420684, |
|
"logits/chosen": -3.140625, |
|
"logits/rejected": -3.125, |
|
"logps/chosen": -296.0, |
|
"logps/rejected": -496.0, |
|
"loss": 0.0294, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": -1.875, |
|
"rewards/margins": 14.8125, |
|
"rewards/rejected": -16.75, |
|
"step": 130, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 1.2188811302185059, |
|
"learning_rate": 0.0004975670171853926, |
|
"logits/chosen": -3.21875, |
|
"logits/rejected": -3.21875, |
|
"logps/chosen": -300.0, |
|
"logps/rejected": -478.0, |
|
"loss": 0.0312, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.98828125, |
|
"rewards/chosen": -1.3515625, |
|
"rewards/margins": 13.0, |
|
"rewards/rejected": -14.375, |
|
"step": 140, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 1.9386507272720337, |
|
"learning_rate": 0.000496201938253052, |
|
"logits/chosen": -3.1875, |
|
"logits/rejected": -3.140625, |
|
"logps/chosen": -288.0, |
|
"logps/rejected": -488.0, |
|
"loss": 0.0504, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": -1.6171875, |
|
"rewards/margins": 13.25, |
|
"rewards/rejected": -14.8125, |
|
"step": 150, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.11973602324724197, |
|
"learning_rate": 0.0004945369001834514, |
|
"logits/chosen": -2.9375, |
|
"logits/rejected": -2.84375, |
|
"logps/chosen": -298.0, |
|
"logps/rejected": -486.0, |
|
"loss": 0.0395, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": -1.9296875, |
|
"rewards/margins": 14.5625, |
|
"rewards/rejected": -16.5, |
|
"step": 160, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.0885164737701416, |
|
"learning_rate": 0.0004925739315689991, |
|
"logits/chosen": -2.828125, |
|
"logits/rejected": -2.78125, |
|
"logps/chosen": -292.0, |
|
"logps/rejected": -490.0, |
|
"loss": 0.0511, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": -1.890625, |
|
"rewards/margins": 14.25, |
|
"rewards/rejected": -16.125, |
|
"step": 170, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 5.371945381164551, |
|
"learning_rate": 0.0004903154239845797, |
|
"logits/chosen": -2.71875, |
|
"logits/rejected": -2.671875, |
|
"logps/chosen": -304.0, |
|
"logps/rejected": -474.0, |
|
"loss": 0.028, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.98828125, |
|
"rewards/chosen": -2.03125, |
|
"rewards/margins": 12.75, |
|
"rewards/rejected": -14.75, |
|
"step": 180, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 1.7487589120864868, |
|
"learning_rate": 0.0004877641290737884, |
|
"logits/chosen": -2.90625, |
|
"logits/rejected": -2.875, |
|
"logps/chosen": -288.0, |
|
"logps/rejected": -484.0, |
|
"loss": 0.053, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.98046875, |
|
"rewards/chosen": -1.71875, |
|
"rewards/margins": 13.3125, |
|
"rewards/rejected": -15.0, |
|
"step": 190, |
|
"total_memory_available (GB)": 94.62 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 0.1586950570344925, |
|
"learning_rate": 0.0004849231551964771, |
|
"logits/chosen": -3.0, |
|
"logits/rejected": -2.96875, |
|
"logps/chosen": -296.0, |
|
"logps/rejected": -516.0, |
|
"loss": 0.0173, |
|
"max_memory_allocated (GB)": 94.39, |
|
"memory_allocated (GB)": 51.7, |
|
"rewards/accuracies": 0.9921875, |
|
"rewards/chosen": -2.375, |
|
"rewards/margins": 16.75, |
|
"rewards/rejected": -19.125, |
|
"step": 200, |
|
"total_memory_available (GB)": 94.62 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 10, |
|
"total_flos": 1.679841703231488e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|