roberta-base-toxicity / trainer_state (1).json
mskov's picture
Upload 9 files
49a3bfd
{
"best_metric": 0.14040808379650116,
"best_model_checkpoint": "./results/checkpoint-51064",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 51064,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 1.986944487962818e-05,
"loss": 0.2496,
"step": 500
},
{
"epoch": 0.04,
"learning_rate": 1.9738889759256358e-05,
"loss": 0.208,
"step": 1000
},
{
"epoch": 0.06,
"learning_rate": 1.9608334638884537e-05,
"loss": 0.1884,
"step": 1500
},
{
"epoch": 0.08,
"learning_rate": 1.9477779518512717e-05,
"loss": 0.1955,
"step": 2000
},
{
"epoch": 0.1,
"learning_rate": 1.9347224398140897e-05,
"loss": 0.2005,
"step": 2500
},
{
"epoch": 0.12,
"learning_rate": 1.9216669277769077e-05,
"loss": 0.1625,
"step": 3000
},
{
"epoch": 0.14,
"learning_rate": 1.9086114157397256e-05,
"loss": 0.18,
"step": 3500
},
{
"epoch": 0.16,
"learning_rate": 1.8955559037025433e-05,
"loss": 0.153,
"step": 4000
},
{
"epoch": 0.18,
"learning_rate": 1.8825003916653613e-05,
"loss": 0.1996,
"step": 4500
},
{
"epoch": 0.2,
"learning_rate": 1.8694448796281792e-05,
"loss": 0.1643,
"step": 5000
},
{
"epoch": 0.22,
"learning_rate": 1.856389367590997e-05,
"loss": 0.2022,
"step": 5500
},
{
"epoch": 0.23,
"learning_rate": 1.843333855553815e-05,
"loss": 0.1706,
"step": 6000
},
{
"epoch": 0.25,
"learning_rate": 1.8302783435166328e-05,
"loss": 0.1895,
"step": 6500
},
{
"epoch": 0.27,
"learning_rate": 1.8172228314794508e-05,
"loss": 0.1733,
"step": 7000
},
{
"epoch": 0.29,
"learning_rate": 1.8041673194422688e-05,
"loss": 0.1938,
"step": 7500
},
{
"epoch": 0.31,
"learning_rate": 1.7911118074050867e-05,
"loss": 0.1786,
"step": 8000
},
{
"epoch": 0.33,
"learning_rate": 1.7780562953679047e-05,
"loss": 0.19,
"step": 8500
},
{
"epoch": 0.35,
"learning_rate": 1.7650007833307223e-05,
"loss": 0.1908,
"step": 9000
},
{
"epoch": 0.37,
"learning_rate": 1.7519452712935403e-05,
"loss": 0.1755,
"step": 9500
},
{
"epoch": 0.39,
"learning_rate": 1.7388897592563583e-05,
"loss": 0.1536,
"step": 10000
},
{
"epoch": 0.41,
"learning_rate": 1.725834247219176e-05,
"loss": 0.1574,
"step": 10500
},
{
"epoch": 0.43,
"learning_rate": 1.712778735181994e-05,
"loss": 0.1472,
"step": 11000
},
{
"epoch": 0.45,
"learning_rate": 1.699723223144812e-05,
"loss": 0.1936,
"step": 11500
},
{
"epoch": 0.47,
"learning_rate": 1.68666771110763e-05,
"loss": 0.1746,
"step": 12000
},
{
"epoch": 0.49,
"learning_rate": 1.6736121990704478e-05,
"loss": 0.1731,
"step": 12500
},
{
"epoch": 0.51,
"learning_rate": 1.6605566870332658e-05,
"loss": 0.1927,
"step": 13000
},
{
"epoch": 0.53,
"learning_rate": 1.6475011749960834e-05,
"loss": 0.1946,
"step": 13500
},
{
"epoch": 0.55,
"learning_rate": 1.6344456629589014e-05,
"loss": 0.1636,
"step": 14000
},
{
"epoch": 0.57,
"learning_rate": 1.6213901509217194e-05,
"loss": 0.2385,
"step": 14500
},
{
"epoch": 0.59,
"learning_rate": 1.608334638884537e-05,
"loss": 0.2137,
"step": 15000
},
{
"epoch": 0.61,
"learning_rate": 1.595279126847355e-05,
"loss": 0.1736,
"step": 15500
},
{
"epoch": 0.63,
"learning_rate": 1.582223614810173e-05,
"loss": 0.2196,
"step": 16000
},
{
"epoch": 0.65,
"learning_rate": 1.569168102772991e-05,
"loss": 0.1878,
"step": 16500
},
{
"epoch": 0.67,
"learning_rate": 1.556112590735809e-05,
"loss": 0.1742,
"step": 17000
},
{
"epoch": 0.69,
"learning_rate": 1.543057078698627e-05,
"loss": 0.1907,
"step": 17500
},
{
"epoch": 0.7,
"learning_rate": 1.5300015666614445e-05,
"loss": 0.2048,
"step": 18000
},
{
"epoch": 0.72,
"learning_rate": 1.5169460546242625e-05,
"loss": 0.1777,
"step": 18500
},
{
"epoch": 0.74,
"learning_rate": 1.5038905425870805e-05,
"loss": 0.213,
"step": 19000
},
{
"epoch": 0.76,
"learning_rate": 1.4908350305498983e-05,
"loss": 0.1941,
"step": 19500
},
{
"epoch": 0.78,
"learning_rate": 1.4777795185127163e-05,
"loss": 0.1525,
"step": 20000
},
{
"epoch": 0.8,
"learning_rate": 1.4647240064755342e-05,
"loss": 0.1638,
"step": 20500
},
{
"epoch": 0.82,
"learning_rate": 1.4516684944383519e-05,
"loss": 0.174,
"step": 21000
},
{
"epoch": 0.84,
"learning_rate": 1.4386129824011698e-05,
"loss": 0.1693,
"step": 21500
},
{
"epoch": 0.86,
"learning_rate": 1.4255574703639878e-05,
"loss": 0.1926,
"step": 22000
},
{
"epoch": 0.88,
"learning_rate": 1.4125019583268056e-05,
"loss": 0.1915,
"step": 22500
},
{
"epoch": 0.9,
"learning_rate": 1.3994464462896236e-05,
"loss": 0.1671,
"step": 23000
},
{
"epoch": 0.92,
"learning_rate": 1.3863909342524416e-05,
"loss": 0.1572,
"step": 23500
},
{
"epoch": 0.94,
"learning_rate": 1.3733354222152594e-05,
"loss": 0.1613,
"step": 24000
},
{
"epoch": 0.96,
"learning_rate": 1.3602799101780773e-05,
"loss": 0.165,
"step": 24500
},
{
"epoch": 0.98,
"learning_rate": 1.3472243981408951e-05,
"loss": 0.1739,
"step": 25000
},
{
"epoch": 1.0,
"learning_rate": 1.3341688861037131e-05,
"loss": 0.1611,
"step": 25500
},
{
"epoch": 1.0,
"eval_loss": 0.17089270055294037,
"eval_runtime": 899.1592,
"eval_samples_per_second": 35.494,
"eval_steps_per_second": 7.099,
"step": 25532
},
{
"epoch": 1.02,
"learning_rate": 1.321113374066531e-05,
"loss": 0.1776,
"step": 26000
},
{
"epoch": 1.04,
"learning_rate": 1.3080578620293489e-05,
"loss": 0.1759,
"step": 26500
},
{
"epoch": 1.06,
"learning_rate": 1.2950023499921669e-05,
"loss": 0.1544,
"step": 27000
},
{
"epoch": 1.08,
"learning_rate": 1.2819468379549847e-05,
"loss": 0.1691,
"step": 27500
},
{
"epoch": 1.1,
"learning_rate": 1.2688913259178027e-05,
"loss": 0.1866,
"step": 28000
},
{
"epoch": 1.12,
"learning_rate": 1.2558358138806206e-05,
"loss": 0.1833,
"step": 28500
},
{
"epoch": 1.14,
"learning_rate": 1.2427803018434383e-05,
"loss": 0.1791,
"step": 29000
},
{
"epoch": 1.16,
"learning_rate": 1.2297247898062562e-05,
"loss": 0.173,
"step": 29500
},
{
"epoch": 1.17,
"learning_rate": 1.2166692777690742e-05,
"loss": 0.2536,
"step": 30000
},
{
"epoch": 1.19,
"learning_rate": 1.203613765731892e-05,
"loss": 0.2705,
"step": 30500
},
{
"epoch": 1.21,
"learning_rate": 1.19055825369471e-05,
"loss": 0.2602,
"step": 31000
},
{
"epoch": 1.23,
"learning_rate": 1.177502741657528e-05,
"loss": 0.1628,
"step": 31500
},
{
"epoch": 1.25,
"learning_rate": 1.1644472296203458e-05,
"loss": 0.249,
"step": 32000
},
{
"epoch": 1.27,
"learning_rate": 1.1513917175831637e-05,
"loss": 0.2991,
"step": 32500
},
{
"epoch": 1.29,
"learning_rate": 1.1383362055459817e-05,
"loss": 0.3618,
"step": 33000
},
{
"epoch": 1.31,
"learning_rate": 1.1252806935087994e-05,
"loss": 0.3487,
"step": 33500
},
{
"epoch": 1.33,
"learning_rate": 1.1122251814716173e-05,
"loss": 0.3179,
"step": 34000
},
{
"epoch": 1.35,
"learning_rate": 1.0991696694344353e-05,
"loss": 0.3457,
"step": 34500
},
{
"epoch": 1.37,
"learning_rate": 1.0861141573972531e-05,
"loss": 0.3574,
"step": 35000
},
{
"epoch": 1.39,
"learning_rate": 1.073058645360071e-05,
"loss": 0.3486,
"step": 35500
},
{
"epoch": 1.41,
"learning_rate": 1.060003133322889e-05,
"loss": 0.2576,
"step": 36000
},
{
"epoch": 1.43,
"learning_rate": 1.0469476212857069e-05,
"loss": 0.1753,
"step": 36500
},
{
"epoch": 1.45,
"learning_rate": 1.0338921092485248e-05,
"loss": 0.2012,
"step": 37000
},
{
"epoch": 1.47,
"learning_rate": 1.0208365972113428e-05,
"loss": 0.2095,
"step": 37500
},
{
"epoch": 1.49,
"learning_rate": 1.0077810851741604e-05,
"loss": 0.1966,
"step": 38000
},
{
"epoch": 1.51,
"learning_rate": 9.947255731369784e-06,
"loss": 0.1492,
"step": 38500
},
{
"epoch": 1.53,
"learning_rate": 9.816700610997964e-06,
"loss": 0.1663,
"step": 39000
},
{
"epoch": 1.55,
"learning_rate": 9.686145490626144e-06,
"loss": 0.158,
"step": 39500
},
{
"epoch": 1.57,
"learning_rate": 9.555590370254322e-06,
"loss": 0.1635,
"step": 40000
},
{
"epoch": 1.59,
"learning_rate": 9.425035249882501e-06,
"loss": 0.1605,
"step": 40500
},
{
"epoch": 1.61,
"learning_rate": 9.29448012951068e-06,
"loss": 0.1479,
"step": 41000
},
{
"epoch": 1.63,
"learning_rate": 9.16392500913886e-06,
"loss": 0.1555,
"step": 41500
},
{
"epoch": 1.64,
"learning_rate": 9.033369888767039e-06,
"loss": 0.1657,
"step": 42000
},
{
"epoch": 1.66,
"learning_rate": 8.902814768395217e-06,
"loss": 0.1322,
"step": 42500
},
{
"epoch": 1.68,
"learning_rate": 8.772259648023395e-06,
"loss": 0.1441,
"step": 43000
},
{
"epoch": 1.7,
"learning_rate": 8.641704527651575e-06,
"loss": 0.1514,
"step": 43500
},
{
"epoch": 1.72,
"learning_rate": 8.511149407279755e-06,
"loss": 0.1556,
"step": 44000
},
{
"epoch": 1.74,
"learning_rate": 8.380594286907934e-06,
"loss": 0.2028,
"step": 44500
},
{
"epoch": 1.76,
"learning_rate": 8.250039166536112e-06,
"loss": 0.1538,
"step": 45000
},
{
"epoch": 1.78,
"learning_rate": 8.11948404616429e-06,
"loss": 0.1804,
"step": 45500
},
{
"epoch": 1.8,
"learning_rate": 7.98892892579247e-06,
"loss": 0.146,
"step": 46000
},
{
"epoch": 1.82,
"learning_rate": 7.85837380542065e-06,
"loss": 0.1538,
"step": 46500
},
{
"epoch": 1.84,
"learning_rate": 7.727818685048828e-06,
"loss": 0.1476,
"step": 47000
},
{
"epoch": 1.86,
"learning_rate": 7.597263564677008e-06,
"loss": 0.1766,
"step": 47500
},
{
"epoch": 1.88,
"learning_rate": 7.4667084443051865e-06,
"loss": 0.1807,
"step": 48000
},
{
"epoch": 1.9,
"learning_rate": 7.336153323933365e-06,
"loss": 0.1416,
"step": 48500
},
{
"epoch": 1.92,
"learning_rate": 7.205598203561544e-06,
"loss": 0.1453,
"step": 49000
},
{
"epoch": 1.94,
"learning_rate": 7.075043083189723e-06,
"loss": 0.1349,
"step": 49500
},
{
"epoch": 1.96,
"learning_rate": 6.944487962817902e-06,
"loss": 0.1852,
"step": 50000
},
{
"epoch": 1.98,
"learning_rate": 6.813932842446082e-06,
"loss": 0.1599,
"step": 50500
},
{
"epoch": 2.0,
"learning_rate": 6.68337772207426e-06,
"loss": 0.1184,
"step": 51000
},
{
"epoch": 2.0,
"eval_loss": 0.14040808379650116,
"eval_runtime": 896.823,
"eval_samples_per_second": 35.587,
"eval_steps_per_second": 7.117,
"step": 51064
}
],
"logging_steps": 500,
"max_steps": 76596,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 6.717540976607232e+16,
"trial_name": null,
"trial_params": null
}