lewtun's picture
lewtun HF staff
Add HuggingFaceH4/mistral-7b-ift-v12.0 checkpoint
36e31c5
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.3758535919147774,
"eval_steps": 500,
"global_step": 344,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2.173913043478261e-07,
"loss": 1.7313,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 1.0869565217391306e-06,
"loss": 1.6804,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 2.173913043478261e-06,
"loss": 1.3363,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 3.2608695652173914e-06,
"loss": 1.1728,
"step": 15
},
{
"epoch": 0.02,
"learning_rate": 4.347826086956522e-06,
"loss": 1.085,
"step": 20
},
{
"epoch": 0.03,
"learning_rate": 5.4347826086956525e-06,
"loss": 1.0442,
"step": 25
},
{
"epoch": 0.03,
"learning_rate": 6.521739130434783e-06,
"loss": 0.9988,
"step": 30
},
{
"epoch": 0.04,
"learning_rate": 7.608695652173914e-06,
"loss": 0.9694,
"step": 35
},
{
"epoch": 0.04,
"learning_rate": 8.695652173913044e-06,
"loss": 0.9524,
"step": 40
},
{
"epoch": 0.05,
"learning_rate": 9.782608695652175e-06,
"loss": 0.9502,
"step": 45
},
{
"epoch": 0.05,
"learning_rate": 1.0869565217391305e-05,
"loss": 0.9313,
"step": 50
},
{
"epoch": 0.06,
"learning_rate": 1.1956521739130435e-05,
"loss": 0.9182,
"step": 55
},
{
"epoch": 0.07,
"learning_rate": 1.3043478260869566e-05,
"loss": 0.9174,
"step": 60
},
{
"epoch": 0.07,
"learning_rate": 1.4130434782608698e-05,
"loss": 0.9027,
"step": 65
},
{
"epoch": 0.08,
"learning_rate": 1.5217391304347828e-05,
"loss": 0.9057,
"step": 70
},
{
"epoch": 0.08,
"learning_rate": 1.630434782608696e-05,
"loss": 0.9031,
"step": 75
},
{
"epoch": 0.09,
"learning_rate": 1.739130434782609e-05,
"loss": 0.9037,
"step": 80
},
{
"epoch": 0.09,
"learning_rate": 1.847826086956522e-05,
"loss": 0.8971,
"step": 85
},
{
"epoch": 0.1,
"learning_rate": 1.956521739130435e-05,
"loss": 0.8918,
"step": 90
},
{
"epoch": 0.1,
"learning_rate": 1.999934429598561e-05,
"loss": 0.9005,
"step": 95
},
{
"epoch": 0.11,
"learning_rate": 1.9995337527295925e-05,
"loss": 0.882,
"step": 100
},
{
"epoch": 0.11,
"learning_rate": 1.9987689727712563e-05,
"loss": 0.8806,
"step": 105
},
{
"epoch": 0.12,
"learning_rate": 1.997640368312189e-05,
"loss": 0.8911,
"step": 110
},
{
"epoch": 0.13,
"learning_rate": 1.9961483504724445e-05,
"loss": 0.8899,
"step": 115
},
{
"epoch": 0.13,
"learning_rate": 1.9942934627537337e-05,
"loss": 0.8818,
"step": 120
},
{
"epoch": 0.14,
"learning_rate": 1.992076380841442e-05,
"loss": 1.566,
"step": 125
},
{
"epoch": 0.14,
"learning_rate": 1.989497912358495e-05,
"loss": 1.7017,
"step": 130
},
{
"epoch": 0.15,
"learning_rate": 1.9865589965711636e-05,
"loss": 1.2844,
"step": 135
},
{
"epoch": 0.15,
"learning_rate": 1.9832607040469147e-05,
"loss": 1.1296,
"step": 140
},
{
"epoch": 0.16,
"learning_rate": 1.9796042362644315e-05,
"loss": 1.0674,
"step": 145
},
{
"epoch": 0.16,
"learning_rate": 1.9755909251759493e-05,
"loss": 1.0187,
"step": 150
},
{
"epoch": 0.17,
"learning_rate": 1.97122223272206e-05,
"loss": 0.9856,
"step": 155
},
{
"epoch": 0.17,
"learning_rate": 1.9664997502991665e-05,
"loss": 0.9741,
"step": 160
},
{
"epoch": 0.18,
"learning_rate": 1.961425198179781e-05,
"loss": 0.9602,
"step": 165
},
{
"epoch": 0.19,
"learning_rate": 1.9560004248858754e-05,
"loss": 0.9388,
"step": 170
},
{
"epoch": 0.19,
"learning_rate": 1.950227406515516e-05,
"loss": 0.9239,
"step": 175
},
{
"epoch": 0.2,
"learning_rate": 1.9441082460230226e-05,
"loss": 0.9376,
"step": 180
},
{
"epoch": 0.2,
"learning_rate": 1.9376451724529207e-05,
"loss": 0.9257,
"step": 185
},
{
"epoch": 0.21,
"learning_rate": 1.930840540127961e-05,
"loss": 0.927,
"step": 190
},
{
"epoch": 0.21,
"learning_rate": 1.923696827791502e-05,
"loss": 0.9064,
"step": 195
},
{
"epoch": 0.22,
"learning_rate": 1.9162166377045723e-05,
"loss": 0.9088,
"step": 200
},
{
"epoch": 0.22,
"learning_rate": 1.9084026946979366e-05,
"loss": 0.9014,
"step": 205
},
{
"epoch": 0.23,
"learning_rate": 1.9002578451795133e-05,
"loss": 0.9092,
"step": 210
},
{
"epoch": 0.23,
"learning_rate": 1.8917850560975064e-05,
"loss": 0.9081,
"step": 215
},
{
"epoch": 0.24,
"learning_rate": 1.882987413859625e-05,
"loss": 0.9027,
"step": 220
},
{
"epoch": 0.25,
"learning_rate": 1.8738681232087897e-05,
"loss": 0.8962,
"step": 225
},
{
"epoch": 0.25,
"learning_rate": 1.8644305060557317e-05,
"loss": 0.8823,
"step": 230
},
{
"epoch": 0.26,
"learning_rate": 1.8546780002689088e-05,
"loss": 0.892,
"step": 235
},
{
"epoch": 0.26,
"learning_rate": 1.8446141584221854e-05,
"loss": 0.8887,
"step": 240
},
{
"epoch": 0.27,
"learning_rate": 1.834242646500724e-05,
"loss": 0.8843,
"step": 245
},
{
"epoch": 0.27,
"learning_rate": 1.8235672425655678e-05,
"loss": 0.9014,
"step": 250
},
{
"epoch": 0.28,
"learning_rate": 1.8125918353773934e-05,
"loss": 0.8911,
"step": 255
},
{
"epoch": 0.28,
"learning_rate": 1.8013204229799422e-05,
"loss": 0.881,
"step": 260
},
{
"epoch": 0.29,
"learning_rate": 1.7897571112436404e-05,
"loss": 0.8879,
"step": 265
},
{
"epoch": 0.3,
"learning_rate": 1.777906112369942e-05,
"loss": 0.8922,
"step": 270
},
{
"epoch": 0.3,
"learning_rate": 1.7657717433569384e-05,
"loss": 0.8747,
"step": 275
},
{
"epoch": 0.31,
"learning_rate": 1.7533584244267897e-05,
"loss": 0.8715,
"step": 280
},
{
"epoch": 0.31,
"learning_rate": 1.7406706774155625e-05,
"loss": 0.8812,
"step": 285
},
{
"epoch": 0.32,
"learning_rate": 1.7277131241260438e-05,
"loss": 0.8669,
"step": 290
},
{
"epoch": 0.32,
"learning_rate": 1.7144904846441434e-05,
"loss": 0.864,
"step": 295
},
{
"epoch": 0.33,
"learning_rate": 1.7010075756194962e-05,
"loss": 0.8683,
"step": 300
},
{
"epoch": 0.33,
"learning_rate": 1.6872693085108864e-05,
"loss": 0.867,
"step": 305
},
{
"epoch": 0.34,
"learning_rate": 1.673280687797135e-05,
"loss": 0.8724,
"step": 310
},
{
"epoch": 0.34,
"learning_rate": 1.659046809154105e-05,
"loss": 0.8802,
"step": 315
},
{
"epoch": 0.35,
"learning_rate": 1.6445728575984838e-05,
"loss": 0.8657,
"step": 320
},
{
"epoch": 0.36,
"learning_rate": 1.6298641055990222e-05,
"loss": 0.8602,
"step": 325
},
{
"epoch": 0.36,
"learning_rate": 1.614925911155917e-05,
"loss": 0.8641,
"step": 330
},
{
"epoch": 0.37,
"learning_rate": 1.5997637158490366e-05,
"loss": 0.8606,
"step": 335
},
{
"epoch": 0.37,
"learning_rate": 1.5843830428557e-05,
"loss": 0.8586,
"step": 340
},
{
"epoch": 0.38,
"eval_loss": 0.9133486151695251,
"eval_runtime": 211.9502,
"eval_samples_per_second": 173.763,
"eval_steps_per_second": 0.679,
"step": 344
},
{
"epoch": 0.38,
"step": 344,
"total_flos": 576369847173120.0,
"train_loss": 0.9639226873946745,
"train_runtime": 7750.2268,
"train_samples_per_second": 60.453,
"train_steps_per_second": 0.118
}
],
"logging_steps": 5,
"max_steps": 915,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 576369847173120.0,
"trial_name": null,
"trial_params": null
}