Cadet-Tiny / trainer_state.json
ToddGoldfarb's picture
Upload 8 files
f8ed694
raw
history blame
5.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9868723033140548,
"global_step": 21500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 1.9545579730101903e-05,
"loss": 1.1232,
"step": 500
},
{
"epoch": 0.05,
"learning_rate": 1.9086569356467456e-05,
"loss": 0.0953,
"step": 1000
},
{
"epoch": 0.07,
"learning_rate": 1.8627558982833013e-05,
"loss": 0.0845,
"step": 1500
},
{
"epoch": 0.09,
"learning_rate": 1.816854860919857e-05,
"loss": 0.0796,
"step": 2000
},
{
"epoch": 0.11,
"learning_rate": 1.7709538235564126e-05,
"loss": 0.0796,
"step": 2500
},
{
"epoch": 0.14,
"learning_rate": 1.725052786192968e-05,
"loss": 0.078,
"step": 3000
},
{
"epoch": 0.16,
"learning_rate": 1.6791517488295236e-05,
"loss": 0.0766,
"step": 3500
},
{
"epoch": 0.18,
"learning_rate": 1.6332507114660793e-05,
"loss": 0.0742,
"step": 4000
},
{
"epoch": 0.21,
"learning_rate": 1.587349674102635e-05,
"loss": 0.0741,
"step": 4500
},
{
"epoch": 0.23,
"learning_rate": 1.5414486367391906e-05,
"loss": 0.0743,
"step": 5000
},
{
"epoch": 0.25,
"learning_rate": 1.495547599375746e-05,
"loss": 0.0738,
"step": 5500
},
{
"epoch": 0.28,
"learning_rate": 1.4496465620123017e-05,
"loss": 0.0725,
"step": 6000
},
{
"epoch": 0.3,
"learning_rate": 1.4037455246488572e-05,
"loss": 0.0728,
"step": 6500
},
{
"epoch": 0.32,
"learning_rate": 1.3578444872854129e-05,
"loss": 0.0706,
"step": 7000
},
{
"epoch": 0.34,
"learning_rate": 1.3119434499219684e-05,
"loss": 0.0697,
"step": 7500
},
{
"epoch": 0.37,
"learning_rate": 1.266042412558524e-05,
"loss": 0.0713,
"step": 8000
},
{
"epoch": 0.39,
"learning_rate": 1.2201413751950796e-05,
"loss": 0.0701,
"step": 8500
},
{
"epoch": 0.41,
"learning_rate": 1.1742403378316352e-05,
"loss": 0.07,
"step": 9000
},
{
"epoch": 0.44,
"learning_rate": 1.1283393004681905e-05,
"loss": 0.0692,
"step": 9500
},
{
"epoch": 0.46,
"learning_rate": 1.0824382631047464e-05,
"loss": 0.069,
"step": 10000
},
{
"epoch": 0.48,
"learning_rate": 1.0365372257413017e-05,
"loss": 0.0694,
"step": 10500
},
{
"epoch": 0.5,
"learning_rate": 9.906361883778575e-06,
"loss": 0.0691,
"step": 11000
},
{
"epoch": 0.53,
"learning_rate": 9.44735151014413e-06,
"loss": 0.0685,
"step": 11500
},
{
"epoch": 0.55,
"learning_rate": 8.988341136509687e-06,
"loss": 0.0682,
"step": 12000
},
{
"epoch": 0.57,
"learning_rate": 8.529330762875242e-06,
"loss": 0.0682,
"step": 12500
},
{
"epoch": 0.6,
"learning_rate": 8.070320389240799e-06,
"loss": 0.0682,
"step": 13000
},
{
"epoch": 0.62,
"learning_rate": 7.6113100156063535e-06,
"loss": 0.067,
"step": 13500
},
{
"epoch": 0.64,
"learning_rate": 7.152299641971909e-06,
"loss": 0.0669,
"step": 14000
},
{
"epoch": 0.67,
"learning_rate": 6.693289268337465e-06,
"loss": 0.066,
"step": 14500
},
{
"epoch": 0.69,
"learning_rate": 6.234278894703021e-06,
"loss": 0.0664,
"step": 15000
},
{
"epoch": 0.71,
"learning_rate": 5.776186541815846e-06,
"loss": 0.0667,
"step": 15500
},
{
"epoch": 0.73,
"learning_rate": 5.317176168181402e-06,
"loss": 0.0667,
"step": 16000
},
{
"epoch": 0.76,
"learning_rate": 4.858165794546958e-06,
"loss": 0.0671,
"step": 16500
},
{
"epoch": 0.78,
"learning_rate": 4.3991554209125135e-06,
"loss": 0.0668,
"step": 17000
},
{
"epoch": 0.8,
"learning_rate": 3.940145047278069e-06,
"loss": 0.0661,
"step": 17500
},
{
"epoch": 0.83,
"learning_rate": 3.4811346736436247e-06,
"loss": 0.0674,
"step": 18000
},
{
"epoch": 0.85,
"learning_rate": 3.0221243000091805e-06,
"loss": 0.0662,
"step": 18500
},
{
"epoch": 0.87,
"learning_rate": 2.5631139263747363e-06,
"loss": 0.067,
"step": 19000
},
{
"epoch": 0.9,
"learning_rate": 2.104103552740292e-06,
"loss": 0.0657,
"step": 19500
},
{
"epoch": 0.92,
"learning_rate": 1.645093179105848e-06,
"loss": 0.0658,
"step": 20000
},
{
"epoch": 0.94,
"learning_rate": 1.1860828054714039e-06,
"loss": 0.0663,
"step": 20500
},
{
"epoch": 0.96,
"learning_rate": 7.270724318369595e-07,
"loss": 0.0661,
"step": 21000
},
{
"epoch": 0.99,
"learning_rate": 2.6898007894978427e-07,
"loss": 0.0666,
"step": 21500
}
],
"max_steps": 21786,
"num_train_epochs": 1,
"total_flos": 4.6557579706368e+16,
"trial_name": null,
"trial_params": null
}