burtenshaw's picture
burtenshaw HF staff
Upload folder using huggingface_hub
b00caa3 verified
{
"best_metric": 0.8604190945625305,
"best_model_checkpoint": "./checkpoints/dpo-mix-7k/Qwen1.5-0.5B-dpo-mix-7k-lambda1.0-ORPO-3-7-44/checkpoint-1687",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1687,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 800.0,
"learning_rate": 2.5e-06,
"loss": 18.0938,
"step": 50
},
{
"epoch": 0.06,
"grad_norm": 23.5,
"learning_rate": 5e-06,
"loss": 6.3163,
"step": 100
},
{
"epoch": 0.09,
"grad_norm": 50.75,
"learning_rate": 4.987763947682049e-06,
"loss": 0.9927,
"step": 150
},
{
"epoch": 0.12,
"grad_norm": 19.0,
"learning_rate": 4.951175567509258e-06,
"loss": 1.0442,
"step": 200
},
{
"epoch": 0.15,
"grad_norm": 13.625,
"learning_rate": 4.890593017348846e-06,
"loss": 0.9387,
"step": 250
},
{
"epoch": 0.18,
"grad_norm": 66.5,
"learning_rate": 4.806609330203466e-06,
"loss": 0.97,
"step": 300
},
{
"epoch": 0.21,
"grad_norm": 22.5,
"learning_rate": 4.700046609104932e-06,
"loss": 0.9789,
"step": 350
},
{
"epoch": 0.24,
"grad_norm": 11.625,
"learning_rate": 4.5719479796776466e-06,
"loss": 0.8951,
"step": 400
},
{
"epoch": 0.27,
"grad_norm": 51.0,
"learning_rate": 4.423567379146835e-06,
"loss": 1.0329,
"step": 450
},
{
"epoch": 0.3,
"grad_norm": 13.9375,
"learning_rate": 4.256357281745347e-06,
"loss": 1.0507,
"step": 500
},
{
"epoch": 0.33,
"grad_norm": 15.0625,
"learning_rate": 4.071954480673098e-06,
"loss": 0.8861,
"step": 550
},
{
"epoch": 0.36,
"grad_norm": 33.75,
"learning_rate": 3.872164065787287e-06,
"loss": 0.8826,
"step": 600
},
{
"epoch": 0.39,
"grad_norm": 16.875,
"learning_rate": 3.6589417538632477e-06,
"loss": 1.0443,
"step": 650
},
{
"epoch": 0.41,
"grad_norm": 17.25,
"learning_rate": 3.4343747443922253e-06,
"loss": 0.9825,
"step": 700
},
{
"epoch": 0.44,
"grad_norm": 10.1875,
"learning_rate": 3.2006612883156387e-06,
"loss": 0.8588,
"step": 750
},
{
"epoch": 0.47,
"grad_norm": 18.5,
"learning_rate": 2.960089169694257e-06,
"loss": 0.86,
"step": 800
},
{
"epoch": 0.5,
"grad_norm": 34.0,
"learning_rate": 2.7150133109518347e-06,
"loss": 0.8479,
"step": 850
},
{
"epoch": 0.53,
"grad_norm": 17.25,
"learning_rate": 2.4678327209119225e-06,
"loss": 0.8909,
"step": 900
},
{
"epoch": 0.56,
"grad_norm": 19.625,
"learning_rate": 2.220967011279888e-06,
"loss": 0.8326,
"step": 950
},
{
"epoch": 0.59,
"grad_norm": 15.6875,
"learning_rate": 1.976832711446584e-06,
"loss": 0.9614,
"step": 1000
},
{
"epoch": 0.62,
"grad_norm": 28.0,
"learning_rate": 1.737819613464305e-06,
"loss": 0.751,
"step": 1050
},
{
"epoch": 0.65,
"grad_norm": 17.125,
"learning_rate": 1.506267378750319e-06,
"loss": 0.9322,
"step": 1100
},
{
"epoch": 0.68,
"grad_norm": 20.125,
"learning_rate": 1.2844426355112658e-06,
"loss": 0.7542,
"step": 1150
},
{
"epoch": 0.71,
"grad_norm": 21.5,
"learning_rate": 1.0745167910780963e-06,
"loss": 0.8321,
"step": 1200
},
{
"epoch": 0.74,
"grad_norm": 14.25,
"learning_rate": 8.785447763431101e-07,
"loss": 0.87,
"step": 1250
},
{
"epoch": 0.77,
"grad_norm": 15.25,
"learning_rate": 6.984449303664287e-07,
"loss": 0.8061,
"step": 1300
},
{
"epoch": 0.8,
"grad_norm": 20.375,
"learning_rate": 5.359802220583124e-07,
"loss": 0.8294,
"step": 1350
},
{
"epoch": 0.83,
"grad_norm": 12.5625,
"learning_rate": 3.9274099275530384e-07,
"loss": 0.9622,
"step": 1400
},
{
"epoch": 0.86,
"grad_norm": 16.75,
"learning_rate": 2.7012938862039123e-07,
"loss": 0.8932,
"step": 1450
},
{
"epoch": 0.89,
"grad_norm": 13.75,
"learning_rate": 1.6934563525596053e-07,
"loss": 0.9018,
"step": 1500
},
{
"epoch": 0.92,
"grad_norm": 26.75,
"learning_rate": 9.137628888518491e-08,
"loss": 0.9468,
"step": 1550
},
{
"epoch": 0.95,
"grad_norm": 18.75,
"learning_rate": 3.698457910917608e-08,
"loss": 0.8617,
"step": 1600
},
{
"epoch": 0.98,
"grad_norm": 13.125,
"learning_rate": 6.702937773119533e-09,
"loss": 0.8404,
"step": 1650
},
{
"epoch": 1.0,
"eval_loss": 0.8604190945625305,
"eval_runtime": 87.9192,
"eval_samples_per_second": 4.277,
"eval_steps_per_second": 2.138,
"step": 1687
}
],
"logging_steps": 50,
"max_steps": 1687,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 1.2782603635523584e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}