soraberta / trainer_state.json
abdouaziiz's picture
Upload trainer_state.json
c3732e7
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 375.83892617449663,
"global_step": 56000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.36,
"learning_rate": 4.956305928411633e-05,
"loss": 6.8613,
"step": 500
},
{
"epoch": 6.71,
"learning_rate": 4.912611856823266e-05,
"loss": 6.0182,
"step": 1000
},
{
"epoch": 10.07,
"learning_rate": 4.868917785234899e-05,
"loss": 5.8106,
"step": 1500
},
{
"epoch": 13.42,
"learning_rate": 4.825223713646533e-05,
"loss": 5.6038,
"step": 2000
},
{
"epoch": 16.78,
"learning_rate": 4.781529642058166e-05,
"loss": 5.3695,
"step": 2500
},
{
"epoch": 20.13,
"learning_rate": 4.737835570469799e-05,
"loss": 5.1081,
"step": 3000
},
{
"epoch": 23.49,
"learning_rate": 4.694141498881432e-05,
"loss": 4.84,
"step": 3500
},
{
"epoch": 26.85,
"learning_rate": 4.6504474272930655e-05,
"loss": 4.6172,
"step": 4000
},
{
"epoch": 30.2,
"learning_rate": 4.6067533557046985e-05,
"loss": 4.4057,
"step": 4500
},
{
"epoch": 33.56,
"learning_rate": 4.563059284116331e-05,
"loss": 4.2367,
"step": 5000
},
{
"epoch": 36.91,
"learning_rate": 4.5193652125279645e-05,
"loss": 4.0491,
"step": 5500
},
{
"epoch": 40.27,
"learning_rate": 4.4756711409395975e-05,
"loss": 3.9008,
"step": 6000
},
{
"epoch": 43.62,
"learning_rate": 4.4319770693512305e-05,
"loss": 3.7661,
"step": 6500
},
{
"epoch": 46.98,
"learning_rate": 4.3882829977628635e-05,
"loss": 3.6335,
"step": 7000
},
{
"epoch": 50.34,
"learning_rate": 4.344588926174497e-05,
"loss": 3.4885,
"step": 7500
},
{
"epoch": 53.69,
"learning_rate": 4.30089485458613e-05,
"loss": 3.377,
"step": 8000
},
{
"epoch": 57.05,
"learning_rate": 4.257200782997763e-05,
"loss": 3.275,
"step": 8500
},
{
"epoch": 60.4,
"learning_rate": 4.213506711409396e-05,
"loss": 3.1363,
"step": 9000
},
{
"epoch": 63.76,
"learning_rate": 4.169812639821029e-05,
"loss": 3.0257,
"step": 9500
},
{
"epoch": 67.11,
"learning_rate": 4.126118568232663e-05,
"loss": 2.9229,
"step": 10000
},
{
"epoch": 70.47,
"learning_rate": 4.082424496644295e-05,
"loss": 2.8369,
"step": 10500
},
{
"epoch": 73.83,
"learning_rate": 4.038730425055929e-05,
"loss": 2.7189,
"step": 11000
},
{
"epoch": 77.18,
"learning_rate": 3.995036353467562e-05,
"loss": 2.6285,
"step": 11500
},
{
"epoch": 80.54,
"learning_rate": 3.951342281879195e-05,
"loss": 2.5286,
"step": 12000
},
{
"epoch": 83.89,
"learning_rate": 3.907648210290828e-05,
"loss": 2.4418,
"step": 12500
},
{
"epoch": 87.25,
"learning_rate": 3.863954138702461e-05,
"loss": 2.3471,
"step": 13000
},
{
"epoch": 90.6,
"learning_rate": 3.8202600671140944e-05,
"loss": 2.2438,
"step": 13500
},
{
"epoch": 93.96,
"learning_rate": 3.7765659955257274e-05,
"loss": 2.1722,
"step": 14000
},
{
"epoch": 97.32,
"learning_rate": 3.7328719239373604e-05,
"loss": 2.0995,
"step": 14500
},
{
"epoch": 100.67,
"learning_rate": 3.6891778523489934e-05,
"loss": 2.0343,
"step": 15000
},
{
"epoch": 104.03,
"learning_rate": 3.645483780760627e-05,
"loss": 1.9535,
"step": 15500
},
{
"epoch": 107.38,
"learning_rate": 3.6017897091722594e-05,
"loss": 1.8755,
"step": 16000
},
{
"epoch": 110.74,
"learning_rate": 3.5580956375838924e-05,
"loss": 1.8003,
"step": 16500
},
{
"epoch": 114.09,
"learning_rate": 3.514401565995526e-05,
"loss": 1.7354,
"step": 17000
},
{
"epoch": 117.45,
"learning_rate": 3.470707494407159e-05,
"loss": 1.666,
"step": 17500
},
{
"epoch": 120.81,
"learning_rate": 3.427013422818792e-05,
"loss": 1.6116,
"step": 18000
},
{
"epoch": 124.16,
"learning_rate": 3.383319351230425e-05,
"loss": 1.5483,
"step": 18500
},
{
"epoch": 127.52,
"learning_rate": 3.339625279642059e-05,
"loss": 1.4874,
"step": 19000
},
{
"epoch": 130.87,
"learning_rate": 3.295931208053692e-05,
"loss": 1.4294,
"step": 19500
},
{
"epoch": 134.23,
"learning_rate": 3.252237136465325e-05,
"loss": 1.3839,
"step": 20000
},
{
"epoch": 137.58,
"learning_rate": 3.2085430648769577e-05,
"loss": 1.32,
"step": 20500
},
{
"epoch": 140.94,
"learning_rate": 3.1648489932885906e-05,
"loss": 1.2635,
"step": 21000
},
{
"epoch": 144.3,
"learning_rate": 3.1211549217002236e-05,
"loss": 1.2201,
"step": 21500
},
{
"epoch": 147.65,
"learning_rate": 3.0774608501118566e-05,
"loss": 1.1751,
"step": 22000
},
{
"epoch": 151.01,
"learning_rate": 3.0337667785234903e-05,
"loss": 1.1269,
"step": 22500
},
{
"epoch": 154.36,
"learning_rate": 2.990072706935123e-05,
"loss": 1.087,
"step": 23000
},
{
"epoch": 157.72,
"learning_rate": 2.9463786353467566e-05,
"loss": 1.0491,
"step": 23500
},
{
"epoch": 161.07,
"learning_rate": 2.9026845637583893e-05,
"loss": 1.0008,
"step": 24000
},
{
"epoch": 164.43,
"learning_rate": 2.8589904921700223e-05,
"loss": 0.9718,
"step": 24500
},
{
"epoch": 167.79,
"learning_rate": 2.8152964205816556e-05,
"loss": 0.9386,
"step": 25000
},
{
"epoch": 171.14,
"learning_rate": 2.7716023489932886e-05,
"loss": 0.8923,
"step": 25500
},
{
"epoch": 174.5,
"learning_rate": 2.727908277404922e-05,
"loss": 0.8642,
"step": 26000
},
{
"epoch": 177.85,
"learning_rate": 2.684214205816555e-05,
"loss": 0.8361,
"step": 26500
},
{
"epoch": 181.21,
"learning_rate": 2.6405201342281882e-05,
"loss": 0.8075,
"step": 27000
},
{
"epoch": 184.56,
"learning_rate": 2.5968260626398212e-05,
"loss": 0.7729,
"step": 27500
},
{
"epoch": 187.92,
"learning_rate": 2.5531319910514546e-05,
"loss": 0.7429,
"step": 28000
},
{
"epoch": 191.28,
"learning_rate": 2.5094379194630872e-05,
"loss": 0.7253,
"step": 28500
},
{
"epoch": 194.63,
"learning_rate": 2.4657438478747206e-05,
"loss": 0.693,
"step": 29000
},
{
"epoch": 197.99,
"learning_rate": 2.4220497762863535e-05,
"loss": 0.6717,
"step": 29500
},
{
"epoch": 201.34,
"learning_rate": 2.378355704697987e-05,
"loss": 0.648,
"step": 30000
},
{
"epoch": 204.7,
"learning_rate": 2.3346616331096195e-05,
"loss": 0.617,
"step": 30500
},
{
"epoch": 208.05,
"learning_rate": 2.290967561521253e-05,
"loss": 0.6145,
"step": 31000
},
{
"epoch": 211.41,
"learning_rate": 2.247273489932886e-05,
"loss": 0.5925,
"step": 31500
},
{
"epoch": 214.77,
"learning_rate": 2.2035794183445192e-05,
"loss": 0.574,
"step": 32000
},
{
"epoch": 218.12,
"learning_rate": 2.1598853467561522e-05,
"loss": 0.5549,
"step": 32500
},
{
"epoch": 221.48,
"learning_rate": 2.1161912751677855e-05,
"loss": 0.543,
"step": 33000
},
{
"epoch": 224.83,
"learning_rate": 2.0724972035794185e-05,
"loss": 0.5261,
"step": 33500
},
{
"epoch": 228.19,
"learning_rate": 2.0288031319910515e-05,
"loss": 0.5083,
"step": 34000
},
{
"epoch": 231.54,
"learning_rate": 1.9851090604026848e-05,
"loss": 0.4944,
"step": 34500
},
{
"epoch": 234.9,
"learning_rate": 1.9414149888143178e-05,
"loss": 0.4789,
"step": 35000
},
{
"epoch": 238.26,
"learning_rate": 1.8977209172259508e-05,
"loss": 0.4702,
"step": 35500
},
{
"epoch": 241.61,
"learning_rate": 1.8540268456375838e-05,
"loss": 0.4567,
"step": 36000
},
{
"epoch": 244.97,
"learning_rate": 1.810332774049217e-05,
"loss": 0.4417,
"step": 36500
},
{
"epoch": 248.32,
"learning_rate": 1.76663870246085e-05,
"loss": 0.4414,
"step": 37000
},
{
"epoch": 251.68,
"learning_rate": 1.7229446308724834e-05,
"loss": 0.431,
"step": 37500
},
{
"epoch": 255.03,
"learning_rate": 1.6792505592841164e-05,
"loss": 0.4134,
"step": 38000
},
{
"epoch": 258.39,
"learning_rate": 1.6355564876957498e-05,
"loss": 0.4047,
"step": 38500
},
{
"epoch": 261.74,
"learning_rate": 1.5918624161073824e-05,
"loss": 0.3894,
"step": 39000
},
{
"epoch": 265.1,
"learning_rate": 1.5481683445190158e-05,
"loss": 0.3924,
"step": 39500
},
{
"epoch": 268.46,
"learning_rate": 1.5044742729306487e-05,
"loss": 0.3782,
"step": 40000
},
{
"epoch": 271.81,
"learning_rate": 1.4607802013422819e-05,
"loss": 0.3786,
"step": 40500
},
{
"epoch": 275.17,
"learning_rate": 1.417086129753915e-05,
"loss": 0.361,
"step": 41000
},
{
"epoch": 278.52,
"learning_rate": 1.3733920581655482e-05,
"loss": 0.3588,
"step": 41500
},
{
"epoch": 281.88,
"learning_rate": 1.3296979865771814e-05,
"loss": 0.35,
"step": 42000
},
{
"epoch": 285.23,
"learning_rate": 1.2860039149888146e-05,
"loss": 0.349,
"step": 42500
},
{
"epoch": 288.59,
"learning_rate": 1.2423098434004475e-05,
"loss": 0.344,
"step": 43000
},
{
"epoch": 291.95,
"learning_rate": 1.1986157718120807e-05,
"loss": 0.3314,
"step": 43500
},
{
"epoch": 295.3,
"learning_rate": 1.1549217002237137e-05,
"loss": 0.3288,
"step": 44000
},
{
"epoch": 298.66,
"learning_rate": 1.1112276286353469e-05,
"loss": 0.3259,
"step": 44500
},
{
"epoch": 302.01,
"learning_rate": 1.06753355704698e-05,
"loss": 0.3161,
"step": 45000
},
{
"epoch": 305.37,
"learning_rate": 1.023839485458613e-05,
"loss": 0.3108,
"step": 45500
},
{
"epoch": 308.72,
"learning_rate": 9.80145413870246e-06,
"loss": 0.3141,
"step": 46000
},
{
"epoch": 312.08,
"learning_rate": 9.364513422818792e-06,
"loss": 0.3022,
"step": 46500
},
{
"epoch": 315.44,
"learning_rate": 8.927572706935123e-06,
"loss": 0.2995,
"step": 47000
},
{
"epoch": 318.79,
"learning_rate": 8.490631991051455e-06,
"loss": 0.294,
"step": 47500
},
{
"epoch": 322.15,
"learning_rate": 8.053691275167785e-06,
"loss": 0.2935,
"step": 48000
},
{
"epoch": 325.5,
"learning_rate": 7.6167505592841164e-06,
"loss": 0.2906,
"step": 48500
},
{
"epoch": 328.86,
"learning_rate": 7.179809843400448e-06,
"loss": 0.2854,
"step": 49000
},
{
"epoch": 332.21,
"learning_rate": 6.74286912751678e-06,
"loss": 0.2768,
"step": 49500
},
{
"epoch": 335.57,
"learning_rate": 6.3059284116331096e-06,
"loss": 0.2799,
"step": 50000
},
{
"epoch": 338.93,
"learning_rate": 5.868987695749441e-06,
"loss": 0.2696,
"step": 50500
},
{
"epoch": 342.28,
"learning_rate": 5.432046979865772e-06,
"loss": 0.2713,
"step": 51000
},
{
"epoch": 345.64,
"learning_rate": 4.995106263982103e-06,
"loss": 0.2651,
"step": 51500
},
{
"epoch": 348.99,
"learning_rate": 4.558165548098434e-06,
"loss": 0.2651,
"step": 52000
},
{
"epoch": 352.35,
"learning_rate": 4.121224832214765e-06,
"loss": 0.2621,
"step": 52500
},
{
"epoch": 355.7,
"learning_rate": 3.6842841163310963e-06,
"loss": 0.2671,
"step": 53000
},
{
"epoch": 359.06,
"learning_rate": 3.247343400447427e-06,
"loss": 0.2538,
"step": 53500
},
{
"epoch": 362.42,
"learning_rate": 2.8104026845637587e-06,
"loss": 0.2588,
"step": 54000
},
{
"epoch": 365.77,
"learning_rate": 2.37346196868009e-06,
"loss": 0.2522,
"step": 54500
},
{
"epoch": 369.13,
"learning_rate": 1.9365212527964206e-06,
"loss": 0.2577,
"step": 55000
},
{
"epoch": 372.48,
"learning_rate": 1.4995805369127516e-06,
"loss": 0.244,
"step": 55500
},
{
"epoch": 375.84,
"learning_rate": 1.062639821029083e-06,
"loss": 0.2492,
"step": 56000
}
],
"max_steps": 57216,
"num_train_epochs": 384,
"total_flos": 5.557730674767398e+16,
"trial_name": null,
"trial_params": null
}