byt5_en_swa_news / trainer_state.json
Davlan's picture
add MT model
6034c53
raw
history blame
8.04 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"global_step": 30790,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"learning_rate": 4.91880480675544e-05,
"loss": 1.2994,
"step": 500
},
{
"epoch": 0.32,
"learning_rate": 4.8376096135108804e-05,
"loss": 0.9413,
"step": 1000
},
{
"epoch": 0.49,
"learning_rate": 4.7564144202663205e-05,
"loss": 0.8341,
"step": 1500
},
{
"epoch": 0.65,
"learning_rate": 4.6752192270217606e-05,
"loss": 0.7782,
"step": 2000
},
{
"epoch": 0.81,
"learning_rate": 4.5940240337772006e-05,
"loss": 0.742,
"step": 2500
},
{
"epoch": 0.97,
"learning_rate": 4.512828840532641e-05,
"loss": 0.7083,
"step": 3000
},
{
"epoch": 1.14,
"learning_rate": 4.431633647288081e-05,
"loss": 0.6759,
"step": 3500
},
{
"epoch": 1.3,
"learning_rate": 4.350438454043521e-05,
"loss": 0.6592,
"step": 4000
},
{
"epoch": 1.46,
"learning_rate": 4.269243260798961e-05,
"loss": 0.6393,
"step": 4500
},
{
"epoch": 1.62,
"learning_rate": 4.188048067554401e-05,
"loss": 0.6344,
"step": 5000
},
{
"epoch": 1.79,
"learning_rate": 4.106852874309841e-05,
"loss": 0.6215,
"step": 5500
},
{
"epoch": 1.95,
"learning_rate": 4.025657681065281e-05,
"loss": 0.6102,
"step": 6000
},
{
"epoch": 2.11,
"learning_rate": 3.944462487820721e-05,
"loss": 0.5853,
"step": 6500
},
{
"epoch": 2.27,
"learning_rate": 3.8632672945761614e-05,
"loss": 0.5797,
"step": 7000
},
{
"epoch": 2.44,
"learning_rate": 3.7820721013316015e-05,
"loss": 0.5706,
"step": 7500
},
{
"epoch": 2.6,
"learning_rate": 3.7008769080870415e-05,
"loss": 0.5662,
"step": 8000
},
{
"epoch": 2.76,
"learning_rate": 3.6196817148424816e-05,
"loss": 0.5566,
"step": 8500
},
{
"epoch": 2.92,
"learning_rate": 3.538486521597922e-05,
"loss": 0.5524,
"step": 9000
},
{
"epoch": 3.09,
"learning_rate": 3.457291328353362e-05,
"loss": 0.5395,
"step": 9500
},
{
"epoch": 3.25,
"learning_rate": 3.376096135108802e-05,
"loss": 0.5301,
"step": 10000
},
{
"epoch": 3.41,
"learning_rate": 3.294900941864242e-05,
"loss": 0.5243,
"step": 10500
},
{
"epoch": 3.57,
"learning_rate": 3.213705748619682e-05,
"loss": 0.5158,
"step": 11000
},
{
"epoch": 3.73,
"learning_rate": 3.132510555375122e-05,
"loss": 0.5198,
"step": 11500
},
{
"epoch": 3.9,
"learning_rate": 3.051315362130562e-05,
"loss": 0.5149,
"step": 12000
},
{
"epoch": 4.06,
"learning_rate": 2.970120168886002e-05,
"loss": 0.5072,
"step": 12500
},
{
"epoch": 4.22,
"learning_rate": 2.888924975641442e-05,
"loss": 0.4906,
"step": 13000
},
{
"epoch": 4.38,
"learning_rate": 2.8077297823968824e-05,
"loss": 0.4912,
"step": 13500
},
{
"epoch": 4.55,
"learning_rate": 2.7265345891523225e-05,
"loss": 0.4925,
"step": 14000
},
{
"epoch": 4.71,
"learning_rate": 2.6453393959077623e-05,
"loss": 0.4888,
"step": 14500
},
{
"epoch": 4.87,
"learning_rate": 2.5641442026632023e-05,
"loss": 0.496,
"step": 15000
},
{
"epoch": 5.03,
"learning_rate": 2.4829490094186424e-05,
"loss": 0.48,
"step": 15500
},
{
"epoch": 5.2,
"learning_rate": 2.401753816174083e-05,
"loss": 0.4752,
"step": 16000
},
{
"epoch": 5.36,
"learning_rate": 2.3205586229295226e-05,
"loss": 0.4684,
"step": 16500
},
{
"epoch": 5.52,
"learning_rate": 2.2393634296849627e-05,
"loss": 0.4664,
"step": 17000
},
{
"epoch": 5.68,
"learning_rate": 2.158168236440403e-05,
"loss": 0.4607,
"step": 17500
},
{
"epoch": 5.85,
"learning_rate": 2.0769730431958428e-05,
"loss": 0.4637,
"step": 18000
},
{
"epoch": 6.01,
"learning_rate": 1.995777849951283e-05,
"loss": 0.4694,
"step": 18500
},
{
"epoch": 6.17,
"learning_rate": 1.914582656706723e-05,
"loss": 0.4538,
"step": 19000
},
{
"epoch": 6.33,
"learning_rate": 1.833387463462163e-05,
"loss": 0.4471,
"step": 19500
},
{
"epoch": 6.5,
"learning_rate": 1.752192270217603e-05,
"loss": 0.4489,
"step": 20000
},
{
"epoch": 6.66,
"learning_rate": 1.6709970769730432e-05,
"loss": 0.4543,
"step": 20500
},
{
"epoch": 6.82,
"learning_rate": 1.5898018837284833e-05,
"loss": 0.4454,
"step": 21000
},
{
"epoch": 6.98,
"learning_rate": 1.5086066904839236e-05,
"loss": 0.4469,
"step": 21500
},
{
"epoch": 7.15,
"learning_rate": 1.4274114972393635e-05,
"loss": 0.4375,
"step": 22000
},
{
"epoch": 7.31,
"learning_rate": 1.3462163039948036e-05,
"loss": 0.4331,
"step": 22500
},
{
"epoch": 7.47,
"learning_rate": 1.2650211107502435e-05,
"loss": 0.4328,
"step": 23000
},
{
"epoch": 7.63,
"learning_rate": 1.1838259175056837e-05,
"loss": 0.437,
"step": 23500
},
{
"epoch": 7.79,
"learning_rate": 1.1026307242611238e-05,
"loss": 0.436,
"step": 24000
},
{
"epoch": 7.96,
"learning_rate": 1.0214355310165639e-05,
"loss": 0.4364,
"step": 24500
},
{
"epoch": 8.12,
"learning_rate": 9.40240337772004e-06,
"loss": 0.4322,
"step": 25000
},
{
"epoch": 8.28,
"learning_rate": 8.59045144527444e-06,
"loss": 0.426,
"step": 25500
},
{
"epoch": 8.44,
"learning_rate": 7.778499512828841e-06,
"loss": 0.4264,
"step": 26000
},
{
"epoch": 8.61,
"learning_rate": 6.966547580383241e-06,
"loss": 0.4277,
"step": 26500
},
{
"epoch": 8.77,
"learning_rate": 6.154595647937642e-06,
"loss": 0.4265,
"step": 27000
},
{
"epoch": 8.93,
"learning_rate": 5.342643715492044e-06,
"loss": 0.4236,
"step": 27500
},
{
"epoch": 9.09,
"learning_rate": 4.530691783046444e-06,
"loss": 0.4192,
"step": 28000
},
{
"epoch": 9.26,
"learning_rate": 3.718739850600845e-06,
"loss": 0.4207,
"step": 28500
},
{
"epoch": 9.42,
"learning_rate": 2.9067879181552453e-06,
"loss": 0.422,
"step": 29000
},
{
"epoch": 9.58,
"learning_rate": 2.094835985709646e-06,
"loss": 0.4173,
"step": 29500
},
{
"epoch": 9.74,
"learning_rate": 1.2828840532640467e-06,
"loss": 0.4216,
"step": 30000
},
{
"epoch": 9.91,
"learning_rate": 4.7093212081844755e-07,
"loss": 0.4216,
"step": 30500
},
{
"epoch": 10.0,
"step": 30790,
"total_flos": 4.219299386873119e+17,
"train_loss": 0.5291411468912232,
"train_runtime": 23740.7554,
"train_samples_per_second": 12.966,
"train_steps_per_second": 1.297
}
],
"max_steps": 30790,
"num_train_epochs": 10,
"total_flos": 4.219299386873119e+17,
"trial_name": null,
"trial_params": null
}