math-deepseek-LORA-ArithHardC11 / trainer_state.json
MeedoSam's picture
Uploaded checkpoint-35000
dae1fd8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.875,
"eval_steps": 2500,
"global_step": 35000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 2.9973244667053223,
"learning_rate": 1.971072463768116e-05,
"loss": 1.6969,
"step": 1000
},
{
"epoch": 0.05,
"grad_norm": 7.261388778686523,
"learning_rate": 1.9131014492753626e-05,
"loss": 1.5602,
"step": 2000
},
{
"epoch": 0.06,
"eval_loss": 1.513683795928955,
"eval_runtime": 202.2713,
"eval_samples_per_second": 4.944,
"eval_steps_per_second": 4.944,
"step": 2500
},
{
"epoch": 0.07,
"grad_norm": 3.314629316329956,
"learning_rate": 1.8551304347826088e-05,
"loss": 1.5222,
"step": 3000
},
{
"epoch": 0.1,
"grad_norm": 3.6707098484039307,
"learning_rate": 1.7971594202898553e-05,
"loss": 1.4933,
"step": 4000
},
{
"epoch": 0.12,
"grad_norm": 1.160583257675171,
"learning_rate": 1.7392463768115943e-05,
"loss": 1.5056,
"step": 5000
},
{
"epoch": 0.12,
"eval_loss": 1.4657692909240723,
"eval_runtime": 201.9515,
"eval_samples_per_second": 4.952,
"eval_steps_per_second": 4.952,
"step": 5000
},
{
"epoch": 0.15,
"grad_norm": 2.666102170944214,
"learning_rate": 1.6812753623188408e-05,
"loss": 1.4754,
"step": 6000
},
{
"epoch": 0.17,
"grad_norm": 2.95894455909729,
"learning_rate": 1.623304347826087e-05,
"loss": 1.4816,
"step": 7000
},
{
"epoch": 0.19,
"eval_loss": 1.4514989852905273,
"eval_runtime": 201.9852,
"eval_samples_per_second": 4.951,
"eval_steps_per_second": 4.951,
"step": 7500
},
{
"epoch": 0.2,
"grad_norm": 6.616883277893066,
"learning_rate": 1.5653333333333335e-05,
"loss": 1.4414,
"step": 8000
},
{
"epoch": 0.23,
"grad_norm": 4.114346027374268,
"learning_rate": 1.5074202898550725e-05,
"loss": 1.4392,
"step": 9000
},
{
"epoch": 0.25,
"grad_norm": 9.38364028930664,
"learning_rate": 1.4495072463768116e-05,
"loss": 1.4307,
"step": 10000
},
{
"epoch": 0.25,
"eval_loss": 1.4246548414230347,
"eval_runtime": 201.9169,
"eval_samples_per_second": 4.953,
"eval_steps_per_second": 4.953,
"step": 10000
},
{
"epoch": 0.28,
"grad_norm": 3.899681806564331,
"learning_rate": 1.3915362318840582e-05,
"loss": 1.4313,
"step": 11000
},
{
"epoch": 0.3,
"grad_norm": 2.839545965194702,
"learning_rate": 1.3336231884057973e-05,
"loss": 1.415,
"step": 12000
},
{
"epoch": 0.31,
"eval_loss": 1.4323452711105347,
"eval_runtime": 202.4132,
"eval_samples_per_second": 4.94,
"eval_steps_per_second": 4.94,
"step": 12500
},
{
"epoch": 0.33,
"grad_norm": 3.805086612701416,
"learning_rate": 1.2756521739130436e-05,
"loss": 1.4003,
"step": 13000
},
{
"epoch": 0.35,
"grad_norm": 6.0652241706848145,
"learning_rate": 1.21768115942029e-05,
"loss": 1.3952,
"step": 14000
},
{
"epoch": 0.38,
"grad_norm": 8.923527717590332,
"learning_rate": 1.1597681159420292e-05,
"loss": 1.3933,
"step": 15000
},
{
"epoch": 0.38,
"eval_loss": 1.3821483850479126,
"eval_runtime": 201.8609,
"eval_samples_per_second": 4.954,
"eval_steps_per_second": 4.954,
"step": 15000
},
{
"epoch": 0.4,
"grad_norm": 5.487946033477783,
"learning_rate": 1.1017971014492755e-05,
"loss": 1.3825,
"step": 16000
},
{
"epoch": 0.42,
"grad_norm": 2.9339723587036133,
"learning_rate": 1.0438260869565218e-05,
"loss": 1.373,
"step": 17000
},
{
"epoch": 0.44,
"eval_loss": 1.371016263961792,
"eval_runtime": 201.6518,
"eval_samples_per_second": 4.959,
"eval_steps_per_second": 4.959,
"step": 17500
},
{
"epoch": 0.45,
"grad_norm": 3.8097546100616455,
"learning_rate": 9.85913043478261e-06,
"loss": 1.35,
"step": 18000
},
{
"epoch": 0.47,
"grad_norm": 6.639487266540527,
"learning_rate": 9.279420289855074e-06,
"loss": 1.3498,
"step": 19000
},
{
"epoch": 0.5,
"grad_norm": 4.806727886199951,
"learning_rate": 8.700289855072464e-06,
"loss": 1.3372,
"step": 20000
},
{
"epoch": 0.5,
"eval_loss": 1.3245751857757568,
"eval_runtime": 201.5593,
"eval_samples_per_second": 4.961,
"eval_steps_per_second": 4.961,
"step": 20000
},
{
"epoch": 0.53,
"grad_norm": 6.062647342681885,
"learning_rate": 8.120579710144927e-06,
"loss": 1.3637,
"step": 21000
},
{
"epoch": 0.55,
"grad_norm": 5.603002071380615,
"learning_rate": 7.540869565217392e-06,
"loss": 1.3464,
"step": 22000
},
{
"epoch": 0.56,
"eval_loss": 1.3194873332977295,
"eval_runtime": 201.6454,
"eval_samples_per_second": 4.959,
"eval_steps_per_second": 4.959,
"step": 22500
},
{
"epoch": 0.57,
"grad_norm": 6.0290374755859375,
"learning_rate": 6.961739130434784e-06,
"loss": 1.3187,
"step": 23000
},
{
"epoch": 0.6,
"grad_norm": 2.862253189086914,
"learning_rate": 6.382028985507247e-06,
"loss": 1.321,
"step": 24000
},
{
"epoch": 0.62,
"grad_norm": 2.9182982444763184,
"learning_rate": 5.80231884057971e-06,
"loss": 1.3157,
"step": 25000
},
{
"epoch": 0.62,
"eval_loss": 1.2969852685928345,
"eval_runtime": 201.2648,
"eval_samples_per_second": 4.969,
"eval_steps_per_second": 4.969,
"step": 25000
},
{
"epoch": 0.65,
"grad_norm": 2.6946563720703125,
"learning_rate": 5.222608695652175e-06,
"loss": 1.3161,
"step": 26000
},
{
"epoch": 0.68,
"grad_norm": 10.966987609863281,
"learning_rate": 4.643478260869566e-06,
"loss": 1.3253,
"step": 27000
},
{
"epoch": 0.69,
"eval_loss": 1.3072532415390015,
"eval_runtime": 201.6414,
"eval_samples_per_second": 4.959,
"eval_steps_per_second": 4.959,
"step": 27500
},
{
"epoch": 0.7,
"grad_norm": 14.545968055725098,
"learning_rate": 4.06376811594203e-06,
"loss": 1.3143,
"step": 28000
},
{
"epoch": 0.72,
"grad_norm": 9.552095413208008,
"learning_rate": 3.4840579710144927e-06,
"loss": 1.3042,
"step": 29000
},
{
"epoch": 0.75,
"grad_norm": 2.7875924110412598,
"learning_rate": 2.905507246376812e-06,
"loss": 1.2942,
"step": 30000
},
{
"epoch": 0.75,
"eval_loss": 1.272377848625183,
"eval_runtime": 201.643,
"eval_samples_per_second": 4.959,
"eval_steps_per_second": 4.959,
"step": 30000
},
{
"epoch": 0.78,
"grad_norm": 2.4199697971343994,
"learning_rate": 2.3257971014492754e-06,
"loss": 1.2925,
"step": 31000
},
{
"epoch": 0.8,
"grad_norm": 10.922388076782227,
"learning_rate": 1.7460869565217393e-06,
"loss": 1.2885,
"step": 32000
},
{
"epoch": 0.81,
"eval_loss": 1.3001521825790405,
"eval_runtime": 201.9216,
"eval_samples_per_second": 4.952,
"eval_steps_per_second": 4.952,
"step": 32500
},
{
"epoch": 0.82,
"grad_norm": 10.57639217376709,
"learning_rate": 1.1669565217391306e-06,
"loss": 1.2787,
"step": 33000
},
{
"epoch": 0.85,
"grad_norm": 5.37148380279541,
"learning_rate": 5.872463768115943e-07,
"loss": 1.2788,
"step": 34000
},
{
"epoch": 0.88,
"grad_norm": 12.262892723083496,
"learning_rate": 7.536231884057971e-09,
"loss": 1.2844,
"step": 35000
},
{
"epoch": 0.88,
"eval_loss": 1.2726885080337524,
"eval_runtime": 201.8671,
"eval_samples_per_second": 4.954,
"eval_steps_per_second": 4.954,
"step": 35000
}
],
"logging_steps": 1000,
"max_steps": 35000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 2500,
"total_flos": 5.6357440978944e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}