opt-350m-sst2 / trainer_state.json
Cheng98's picture
Upload folder using huggingface_hub
d28352e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"global_step": 21050,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 1.9527790973871736e-05,
"loss": 0.3444,
"step": 500
},
{
"epoch": 0.24,
"learning_rate": 1.905273159144893e-05,
"loss": 0.2812,
"step": 1000
},
{
"epoch": 0.36,
"learning_rate": 1.857767220902613e-05,
"loss": 0.265,
"step": 1500
},
{
"epoch": 0.48,
"learning_rate": 1.8102612826603327e-05,
"loss": 0.2375,
"step": 2000
},
{
"epoch": 0.59,
"learning_rate": 1.7628503562945368e-05,
"loss": 0.2282,
"step": 2500
},
{
"epoch": 0.71,
"learning_rate": 1.7153444180522567e-05,
"loss": 0.2192,
"step": 3000
},
{
"epoch": 0.83,
"learning_rate": 1.6678384798099765e-05,
"loss": 0.2187,
"step": 3500
},
{
"epoch": 0.95,
"learning_rate": 1.620332541567696e-05,
"loss": 0.2216,
"step": 4000
},
{
"epoch": 1.07,
"learning_rate": 1.5729216152019004e-05,
"loss": 0.1802,
"step": 4500
},
{
"epoch": 1.19,
"learning_rate": 1.5254156769596201e-05,
"loss": 0.1474,
"step": 5000
},
{
"epoch": 1.31,
"learning_rate": 1.4779097387173397e-05,
"loss": 0.144,
"step": 5500
},
{
"epoch": 1.43,
"learning_rate": 1.4304038004750596e-05,
"loss": 0.1607,
"step": 6000
},
{
"epoch": 1.54,
"learning_rate": 1.382897862232779e-05,
"loss": 0.1622,
"step": 6500
},
{
"epoch": 1.66,
"learning_rate": 1.3354869358669835e-05,
"loss": 0.1581,
"step": 7000
},
{
"epoch": 1.78,
"learning_rate": 1.2879809976247033e-05,
"loss": 0.1535,
"step": 7500
},
{
"epoch": 1.9,
"learning_rate": 1.2404750593824228e-05,
"loss": 0.1528,
"step": 8000
},
{
"epoch": 2.02,
"learning_rate": 1.1929691211401427e-05,
"loss": 0.1475,
"step": 8500
},
{
"epoch": 2.14,
"learning_rate": 1.1455581947743468e-05,
"loss": 0.1033,
"step": 9000
},
{
"epoch": 2.26,
"learning_rate": 1.0981472684085512e-05,
"loss": 0.0884,
"step": 9500
},
{
"epoch": 2.38,
"learning_rate": 1.0506413301662707e-05,
"loss": 0.102,
"step": 10000
},
{
"epoch": 2.49,
"learning_rate": 1.0031353919239906e-05,
"loss": 0.104,
"step": 10500
},
{
"epoch": 2.61,
"learning_rate": 9.556294536817104e-06,
"loss": 0.1026,
"step": 11000
},
{
"epoch": 2.73,
"learning_rate": 9.0812351543943e-06,
"loss": 0.1116,
"step": 11500
},
{
"epoch": 2.85,
"learning_rate": 8.606175771971497e-06,
"loss": 0.1002,
"step": 12000
},
{
"epoch": 2.97,
"learning_rate": 8.131116389548695e-06,
"loss": 0.0996,
"step": 12500
},
{
"epoch": 3.09,
"learning_rate": 7.656057007125892e-06,
"loss": 0.0686,
"step": 13000
},
{
"epoch": 3.21,
"learning_rate": 7.180997624703089e-06,
"loss": 0.0594,
"step": 13500
},
{
"epoch": 3.33,
"learning_rate": 6.7068883610451305e-06,
"loss": 0.0562,
"step": 14000
},
{
"epoch": 3.44,
"learning_rate": 6.231828978622329e-06,
"loss": 0.0523,
"step": 14500
},
{
"epoch": 3.56,
"learning_rate": 5.757719714964372e-06,
"loss": 0.0576,
"step": 15000
},
{
"epoch": 3.68,
"learning_rate": 5.282660332541568e-06,
"loss": 0.067,
"step": 15500
},
{
"epoch": 3.8,
"learning_rate": 4.807600950118766e-06,
"loss": 0.0664,
"step": 16000
},
{
"epoch": 3.92,
"learning_rate": 4.332541567695962e-06,
"loss": 0.064,
"step": 16500
},
{
"epoch": 4.04,
"learning_rate": 3.857482185273159e-06,
"loss": 0.0441,
"step": 17000
},
{
"epoch": 4.16,
"learning_rate": 3.382422802850356e-06,
"loss": 0.0306,
"step": 17500
},
{
"epoch": 4.28,
"learning_rate": 2.9073634204275536e-06,
"loss": 0.0287,
"step": 18000
},
{
"epoch": 4.39,
"learning_rate": 2.4323040380047506e-06,
"loss": 0.0314,
"step": 18500
},
{
"epoch": 4.51,
"learning_rate": 1.957244655581948e-06,
"loss": 0.0304,
"step": 19000
},
{
"epoch": 4.63,
"learning_rate": 1.4821852731591448e-06,
"loss": 0.0281,
"step": 19500
},
{
"epoch": 4.75,
"learning_rate": 1.007125890736342e-06,
"loss": 0.0257,
"step": 20000
},
{
"epoch": 4.87,
"learning_rate": 5.320665083135392e-07,
"loss": 0.0385,
"step": 20500
},
{
"epoch": 4.99,
"learning_rate": 5.700712589073635e-08,
"loss": 0.0246,
"step": 21000
},
{
"epoch": 5.0,
"step": 21050,
"total_flos": 7.845474691055616e+16,
"train_loss": 0.11896094931842595,
"train_runtime": 1931.7504,
"train_samples_per_second": 174.321,
"train_steps_per_second": 10.897
}
],
"max_steps": 21050,
"num_train_epochs": 5,
"total_flos": 7.845474691055616e+16,
"trial_name": null,
"trial_params": null
}