math-deepseek-baseline-FTMWP-FULL / trainer_state.json
UltimoUno's picture
Uploaded checkpoint-1000
a3f44bf verified
raw
history blame
No virus
15.7 kB
{
"best_metric": 0.026573657989501953,
"best_model_checkpoint": "runs/deepseek_20240423-162824/checkpoint-1000",
"epoch": 0.31330774653403304,
"eval_steps": 1000,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 11.5,
"learning_rate": 4.0000000000000003e-07,
"loss": 1.5189,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 16.375,
"learning_rate": 8.000000000000001e-07,
"loss": 1.5082,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 21.75,
"learning_rate": 1.2000000000000002e-06,
"loss": 1.4715,
"step": 30
},
{
"epoch": 0.01,
"grad_norm": 13.8125,
"learning_rate": 1.6000000000000001e-06,
"loss": 1.4515,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 9.8125,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.5133,
"step": 50
},
{
"epoch": 0.02,
"grad_norm": 10.25,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.313,
"step": 60
},
{
"epoch": 0.02,
"grad_norm": 18.125,
"learning_rate": 2.8000000000000003e-06,
"loss": 1.4489,
"step": 70
},
{
"epoch": 0.03,
"grad_norm": 28.0,
"learning_rate": 3.2000000000000003e-06,
"loss": 1.2688,
"step": 80
},
{
"epoch": 0.03,
"grad_norm": 9.25,
"learning_rate": 3.6000000000000003e-06,
"loss": 1.2326,
"step": 90
},
{
"epoch": 0.03,
"grad_norm": 25.625,
"learning_rate": 4.000000000000001e-06,
"loss": 1.1791,
"step": 100
},
{
"epoch": 0.03,
"grad_norm": 6.96875,
"learning_rate": 4.4e-06,
"loss": 0.9111,
"step": 110
},
{
"epoch": 0.04,
"grad_norm": 6.84375,
"learning_rate": 4.800000000000001e-06,
"loss": 0.8633,
"step": 120
},
{
"epoch": 0.04,
"grad_norm": 7.9375,
"learning_rate": 5.2e-06,
"loss": 0.5553,
"step": 130
},
{
"epoch": 0.04,
"grad_norm": 14.5625,
"learning_rate": 5.600000000000001e-06,
"loss": 0.5916,
"step": 140
},
{
"epoch": 0.05,
"grad_norm": 8.8125,
"learning_rate": 6e-06,
"loss": 0.4327,
"step": 150
},
{
"epoch": 0.05,
"grad_norm": 3.9375,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.2799,
"step": 160
},
{
"epoch": 0.05,
"grad_norm": 2.34375,
"learning_rate": 6.800000000000001e-06,
"loss": 0.2369,
"step": 170
},
{
"epoch": 0.06,
"grad_norm": 0.171875,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.2997,
"step": 180
},
{
"epoch": 0.06,
"grad_norm": 0.287109375,
"learning_rate": 7.600000000000001e-06,
"loss": 0.0946,
"step": 190
},
{
"epoch": 0.06,
"grad_norm": 18.25,
"learning_rate": 8.000000000000001e-06,
"loss": 0.1124,
"step": 200
},
{
"epoch": 0.07,
"grad_norm": 1.546875,
"learning_rate": 8.400000000000001e-06,
"loss": 0.1131,
"step": 210
},
{
"epoch": 0.07,
"grad_norm": 0.71484375,
"learning_rate": 8.8e-06,
"loss": 0.0652,
"step": 220
},
{
"epoch": 0.07,
"grad_norm": 1.859375,
"learning_rate": 9.200000000000002e-06,
"loss": 0.1486,
"step": 230
},
{
"epoch": 0.08,
"grad_norm": 1.65625,
"learning_rate": 9.600000000000001e-06,
"loss": 0.2169,
"step": 240
},
{
"epoch": 0.08,
"grad_norm": 0.1357421875,
"learning_rate": 1e-05,
"loss": 0.2408,
"step": 250
},
{
"epoch": 0.08,
"grad_norm": 0.055908203125,
"learning_rate": 1.04e-05,
"loss": 0.1166,
"step": 260
},
{
"epoch": 0.08,
"grad_norm": 5.375,
"learning_rate": 1.0800000000000002e-05,
"loss": 0.0114,
"step": 270
},
{
"epoch": 0.09,
"grad_norm": 0.0927734375,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.1631,
"step": 280
},
{
"epoch": 0.09,
"grad_norm": 0.07177734375,
"learning_rate": 1.16e-05,
"loss": 0.0603,
"step": 290
},
{
"epoch": 0.09,
"grad_norm": 0.482421875,
"learning_rate": 1.2e-05,
"loss": 0.1077,
"step": 300
},
{
"epoch": 0.1,
"grad_norm": 12.5625,
"learning_rate": 1.2400000000000002e-05,
"loss": 0.0832,
"step": 310
},
{
"epoch": 0.1,
"grad_norm": 0.048095703125,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.0496,
"step": 320
},
{
"epoch": 0.1,
"grad_norm": 0.400390625,
"learning_rate": 1.3200000000000002e-05,
"loss": 0.1024,
"step": 330
},
{
"epoch": 0.11,
"grad_norm": 20.625,
"learning_rate": 1.3600000000000002e-05,
"loss": 0.1108,
"step": 340
},
{
"epoch": 0.11,
"grad_norm": 0.01611328125,
"learning_rate": 1.4e-05,
"loss": 0.0336,
"step": 350
},
{
"epoch": 0.11,
"grad_norm": 0.01544189453125,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.0165,
"step": 360
},
{
"epoch": 0.12,
"grad_norm": 28.0,
"learning_rate": 1.48e-05,
"loss": 0.113,
"step": 370
},
{
"epoch": 0.12,
"grad_norm": 0.0155029296875,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.0221,
"step": 380
},
{
"epoch": 0.12,
"grad_norm": 0.0164794921875,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.1383,
"step": 390
},
{
"epoch": 0.13,
"grad_norm": 1.3203125,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.0589,
"step": 400
},
{
"epoch": 0.13,
"grad_norm": 0.00909423828125,
"learning_rate": 1.64e-05,
"loss": 0.055,
"step": 410
},
{
"epoch": 0.13,
"grad_norm": 0.236328125,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.0195,
"step": 420
},
{
"epoch": 0.13,
"grad_norm": 0.01025390625,
"learning_rate": 1.72e-05,
"loss": 0.1477,
"step": 430
},
{
"epoch": 0.14,
"grad_norm": 0.007415771484375,
"learning_rate": 1.76e-05,
"loss": 0.0708,
"step": 440
},
{
"epoch": 0.14,
"grad_norm": 0.01214599609375,
"learning_rate": 1.8e-05,
"loss": 0.0808,
"step": 450
},
{
"epoch": 0.14,
"grad_norm": 11.3125,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.0689,
"step": 460
},
{
"epoch": 0.15,
"grad_norm": 0.013427734375,
"learning_rate": 1.88e-05,
"loss": 0.1786,
"step": 470
},
{
"epoch": 0.15,
"grad_norm": 0.0113525390625,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.0897,
"step": 480
},
{
"epoch": 0.15,
"grad_norm": 0.2490234375,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.0934,
"step": 490
},
{
"epoch": 0.16,
"grad_norm": 13.0,
"learning_rate": 2e-05,
"loss": 0.1318,
"step": 500
},
{
"epoch": 0.16,
"grad_norm": 0.0162353515625,
"learning_rate": 1.9955555555555557e-05,
"loss": 0.0156,
"step": 510
},
{
"epoch": 0.16,
"grad_norm": 0.0693359375,
"learning_rate": 1.9911111111111112e-05,
"loss": 0.067,
"step": 520
},
{
"epoch": 0.17,
"grad_norm": 0.007232666015625,
"learning_rate": 1.9866666666666667e-05,
"loss": 0.1199,
"step": 530
},
{
"epoch": 0.17,
"grad_norm": 1.796875,
"learning_rate": 1.9822222222222226e-05,
"loss": 0.0882,
"step": 540
},
{
"epoch": 0.17,
"grad_norm": 10.5,
"learning_rate": 1.977777777777778e-05,
"loss": 0.1482,
"step": 550
},
{
"epoch": 0.18,
"grad_norm": 0.0086669921875,
"learning_rate": 1.9733333333333336e-05,
"loss": 0.0937,
"step": 560
},
{
"epoch": 0.18,
"grad_norm": 0.328125,
"learning_rate": 1.968888888888889e-05,
"loss": 0.0845,
"step": 570
},
{
"epoch": 0.18,
"grad_norm": 17.75,
"learning_rate": 1.9644444444444447e-05,
"loss": 0.1225,
"step": 580
},
{
"epoch": 0.18,
"grad_norm": 0.0068359375,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.1117,
"step": 590
},
{
"epoch": 0.19,
"grad_norm": 0.00830078125,
"learning_rate": 1.9555555555555557e-05,
"loss": 0.1494,
"step": 600
},
{
"epoch": 0.19,
"grad_norm": 0.01019287109375,
"learning_rate": 1.9511111111111113e-05,
"loss": 0.0575,
"step": 610
},
{
"epoch": 0.19,
"grad_norm": 1.5,
"learning_rate": 1.9466666666666668e-05,
"loss": 0.0366,
"step": 620
},
{
"epoch": 0.2,
"grad_norm": 0.035400390625,
"learning_rate": 1.9422222222222223e-05,
"loss": 0.0528,
"step": 630
},
{
"epoch": 0.2,
"grad_norm": 0.00860595703125,
"learning_rate": 1.9377777777777778e-05,
"loss": 0.0518,
"step": 640
},
{
"epoch": 0.2,
"grad_norm": 0.00732421875,
"learning_rate": 1.9333333333333333e-05,
"loss": 0.2077,
"step": 650
},
{
"epoch": 0.21,
"grad_norm": 3.921875,
"learning_rate": 1.928888888888889e-05,
"loss": 0.0559,
"step": 660
},
{
"epoch": 0.21,
"grad_norm": 0.01165771484375,
"learning_rate": 1.9244444444444444e-05,
"loss": 0.139,
"step": 670
},
{
"epoch": 0.21,
"grad_norm": 0.03271484375,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.0641,
"step": 680
},
{
"epoch": 0.22,
"grad_norm": 0.007232666015625,
"learning_rate": 1.9155555555555558e-05,
"loss": 0.0861,
"step": 690
},
{
"epoch": 0.22,
"grad_norm": 0.0341796875,
"learning_rate": 1.9111111111111113e-05,
"loss": 0.0353,
"step": 700
},
{
"epoch": 0.22,
"grad_norm": 6.34375,
"learning_rate": 1.9066666666666668e-05,
"loss": 0.0446,
"step": 710
},
{
"epoch": 0.23,
"grad_norm": 0.01019287109375,
"learning_rate": 1.9022222222222223e-05,
"loss": 0.1009,
"step": 720
},
{
"epoch": 0.23,
"grad_norm": 0.007293701171875,
"learning_rate": 1.897777777777778e-05,
"loss": 0.0141,
"step": 730
},
{
"epoch": 0.23,
"grad_norm": 0.009521484375,
"learning_rate": 1.8933333333333334e-05,
"loss": 0.106,
"step": 740
},
{
"epoch": 0.23,
"grad_norm": 12.3125,
"learning_rate": 1.888888888888889e-05,
"loss": 0.1166,
"step": 750
},
{
"epoch": 0.24,
"grad_norm": 12.875,
"learning_rate": 1.8844444444444444e-05,
"loss": 0.1105,
"step": 760
},
{
"epoch": 0.24,
"grad_norm": 21.25,
"learning_rate": 1.88e-05,
"loss": 0.0407,
"step": 770
},
{
"epoch": 0.24,
"grad_norm": 18.25,
"learning_rate": 1.8755555555555558e-05,
"loss": 0.1127,
"step": 780
},
{
"epoch": 0.25,
"grad_norm": 0.032958984375,
"learning_rate": 1.8711111111111113e-05,
"loss": 0.022,
"step": 790
},
{
"epoch": 0.25,
"grad_norm": 8.6875,
"learning_rate": 1.866666666666667e-05,
"loss": 0.1099,
"step": 800
},
{
"epoch": 0.25,
"grad_norm": 0.005950927734375,
"learning_rate": 1.8622222222222224e-05,
"loss": 0.0456,
"step": 810
},
{
"epoch": 0.26,
"grad_norm": 0.005279541015625,
"learning_rate": 1.857777777777778e-05,
"loss": 0.086,
"step": 820
},
{
"epoch": 0.26,
"grad_norm": 0.03662109375,
"learning_rate": 1.8533333333333334e-05,
"loss": 0.0814,
"step": 830
},
{
"epoch": 0.26,
"grad_norm": 0.0089111328125,
"learning_rate": 1.848888888888889e-05,
"loss": 0.0382,
"step": 840
},
{
"epoch": 0.27,
"grad_norm": 292.0,
"learning_rate": 1.8444444444444448e-05,
"loss": 0.0244,
"step": 850
},
{
"epoch": 0.27,
"grad_norm": 0.01263427734375,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.0213,
"step": 860
},
{
"epoch": 0.27,
"grad_norm": 0.004608154296875,
"learning_rate": 1.835555555555556e-05,
"loss": 0.0438,
"step": 870
},
{
"epoch": 0.28,
"grad_norm": 0.005340576171875,
"learning_rate": 1.8311111111111114e-05,
"loss": 0.2058,
"step": 880
},
{
"epoch": 0.28,
"grad_norm": 0.04931640625,
"learning_rate": 1.826666666666667e-05,
"loss": 0.0169,
"step": 890
},
{
"epoch": 0.28,
"grad_norm": 9.5,
"learning_rate": 1.8222222222222224e-05,
"loss": 0.0693,
"step": 900
},
{
"epoch": 0.29,
"grad_norm": 0.00396728515625,
"learning_rate": 1.817777777777778e-05,
"loss": 0.0502,
"step": 910
},
{
"epoch": 0.29,
"grad_norm": 0.0206298828125,
"learning_rate": 1.8133333333333335e-05,
"loss": 0.0479,
"step": 920
},
{
"epoch": 0.29,
"grad_norm": 0.046875,
"learning_rate": 1.808888888888889e-05,
"loss": 0.0184,
"step": 930
},
{
"epoch": 0.29,
"grad_norm": 13.6875,
"learning_rate": 1.8044444444444445e-05,
"loss": 0.0315,
"step": 940
},
{
"epoch": 0.3,
"grad_norm": 10.0,
"learning_rate": 1.8e-05,
"loss": 0.0702,
"step": 950
},
{
"epoch": 0.3,
"grad_norm": 7.84375,
"learning_rate": 1.7955555555555556e-05,
"loss": 0.162,
"step": 960
},
{
"epoch": 0.3,
"grad_norm": 11.25,
"learning_rate": 1.791111111111111e-05,
"loss": 0.0772,
"step": 970
},
{
"epoch": 0.31,
"grad_norm": 0.0206298828125,
"learning_rate": 1.7866666666666666e-05,
"loss": 0.078,
"step": 980
},
{
"epoch": 0.31,
"grad_norm": 0.0546875,
"learning_rate": 1.782222222222222e-05,
"loss": 0.0804,
"step": 990
},
{
"epoch": 0.31,
"grad_norm": 0.053466796875,
"learning_rate": 1.7777777777777777e-05,
"loss": 0.0512,
"step": 1000
},
{
"epoch": 0.31,
"eval_loss": 0.026573657989501953,
"eval_runtime": 53.6666,
"eval_samples_per_second": 18.634,
"eval_steps_per_second": 18.634,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 1.5733698330624e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}