t5_base-qg-ap-test / trainer_state.json
tiagoblima's picture
End of training
26fd6de verified
raw
history blame
11.4 kB
{
"best_metric": 1.1012225151062012,
"best_model_checkpoint": "/temp/t5_base-qg-ap-test/checkpoint-6464",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 8080,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06,
"learning_rate": 9.876237623762377e-05,
"loss": 1.9841,
"step": 100
},
{
"epoch": 0.12,
"learning_rate": 9.752475247524753e-05,
"loss": 1.2569,
"step": 200
},
{
"epoch": 0.19,
"learning_rate": 9.628712871287129e-05,
"loss": 1.2054,
"step": 300
},
{
"epoch": 0.25,
"learning_rate": 9.504950495049505e-05,
"loss": 1.1704,
"step": 400
},
{
"epoch": 0.31,
"learning_rate": 9.381188118811881e-05,
"loss": 1.1504,
"step": 500
},
{
"epoch": 0.37,
"learning_rate": 9.257425742574259e-05,
"loss": 1.14,
"step": 600
},
{
"epoch": 0.43,
"learning_rate": 9.133663366336635e-05,
"loss": 1.1303,
"step": 700
},
{
"epoch": 0.5,
"learning_rate": 9.009900990099011e-05,
"loss": 1.1022,
"step": 800
},
{
"epoch": 0.56,
"learning_rate": 8.886138613861387e-05,
"loss": 1.0985,
"step": 900
},
{
"epoch": 0.62,
"learning_rate": 8.762376237623763e-05,
"loss": 1.0944,
"step": 1000
},
{
"epoch": 0.68,
"learning_rate": 8.638613861386139e-05,
"loss": 1.0762,
"step": 1100
},
{
"epoch": 0.74,
"learning_rate": 8.514851485148515e-05,
"loss": 1.0784,
"step": 1200
},
{
"epoch": 0.8,
"learning_rate": 8.391089108910891e-05,
"loss": 1.076,
"step": 1300
},
{
"epoch": 0.87,
"learning_rate": 8.267326732673268e-05,
"loss": 1.0672,
"step": 1400
},
{
"epoch": 0.93,
"learning_rate": 8.143564356435644e-05,
"loss": 1.0673,
"step": 1500
},
{
"epoch": 0.99,
"learning_rate": 8.019801980198021e-05,
"loss": 1.0561,
"step": 1600
},
{
"epoch": 1.0,
"eval_loss": 1.1370612382888794,
"eval_runtime": 102.4778,
"eval_samples_per_second": 35.481,
"eval_steps_per_second": 8.87,
"step": 1616
},
{
"epoch": 1.05,
"learning_rate": 7.896039603960397e-05,
"loss": 0.989,
"step": 1700
},
{
"epoch": 1.11,
"learning_rate": 7.772277227722773e-05,
"loss": 0.9881,
"step": 1800
},
{
"epoch": 1.18,
"learning_rate": 7.64851485148515e-05,
"loss": 0.987,
"step": 1900
},
{
"epoch": 1.24,
"learning_rate": 7.524752475247526e-05,
"loss": 0.9849,
"step": 2000
},
{
"epoch": 1.3,
"learning_rate": 7.400990099009902e-05,
"loss": 0.9807,
"step": 2100
},
{
"epoch": 1.36,
"learning_rate": 7.277227722772278e-05,
"loss": 0.9784,
"step": 2200
},
{
"epoch": 1.42,
"learning_rate": 7.153465346534654e-05,
"loss": 0.9868,
"step": 2300
},
{
"epoch": 1.49,
"learning_rate": 7.02970297029703e-05,
"loss": 0.9792,
"step": 2400
},
{
"epoch": 1.55,
"learning_rate": 6.905940594059406e-05,
"loss": 0.988,
"step": 2500
},
{
"epoch": 1.61,
"learning_rate": 6.782178217821783e-05,
"loss": 0.9806,
"step": 2600
},
{
"epoch": 1.67,
"learning_rate": 6.658415841584159e-05,
"loss": 0.9612,
"step": 2700
},
{
"epoch": 1.73,
"learning_rate": 6.534653465346535e-05,
"loss": 0.9632,
"step": 2800
},
{
"epoch": 1.79,
"learning_rate": 6.410891089108911e-05,
"loss": 0.9861,
"step": 2900
},
{
"epoch": 1.86,
"learning_rate": 6.287128712871287e-05,
"loss": 0.968,
"step": 3000
},
{
"epoch": 1.92,
"learning_rate": 6.163366336633663e-05,
"loss": 0.9727,
"step": 3100
},
{
"epoch": 1.98,
"learning_rate": 6.03960396039604e-05,
"loss": 0.9695,
"step": 3200
},
{
"epoch": 2.0,
"eval_loss": 1.1089671850204468,
"eval_runtime": 102.4537,
"eval_samples_per_second": 35.489,
"eval_steps_per_second": 8.872,
"step": 3232
},
{
"epoch": 2.04,
"learning_rate": 5.915841584158416e-05,
"loss": 0.9333,
"step": 3300
},
{
"epoch": 2.1,
"learning_rate": 5.792079207920792e-05,
"loss": 0.9114,
"step": 3400
},
{
"epoch": 2.17,
"learning_rate": 5.668316831683168e-05,
"loss": 0.9173,
"step": 3500
},
{
"epoch": 2.23,
"learning_rate": 5.544554455445545e-05,
"loss": 0.9131,
"step": 3600
},
{
"epoch": 2.29,
"learning_rate": 5.420792079207921e-05,
"loss": 0.9064,
"step": 3700
},
{
"epoch": 2.35,
"learning_rate": 5.2970297029702974e-05,
"loss": 0.9113,
"step": 3800
},
{
"epoch": 2.41,
"learning_rate": 5.1732673267326735e-05,
"loss": 0.8984,
"step": 3900
},
{
"epoch": 2.48,
"learning_rate": 5.0495049504950497e-05,
"loss": 0.9149,
"step": 4000
},
{
"epoch": 2.54,
"learning_rate": 4.925742574257426e-05,
"loss": 0.9041,
"step": 4100
},
{
"epoch": 2.6,
"learning_rate": 4.801980198019802e-05,
"loss": 0.9137,
"step": 4200
},
{
"epoch": 2.66,
"learning_rate": 4.678217821782179e-05,
"loss": 0.9117,
"step": 4300
},
{
"epoch": 2.72,
"learning_rate": 4.554455445544555e-05,
"loss": 0.9024,
"step": 4400
},
{
"epoch": 2.78,
"learning_rate": 4.430693069306931e-05,
"loss": 0.914,
"step": 4500
},
{
"epoch": 2.85,
"learning_rate": 4.306930693069307e-05,
"loss": 0.9295,
"step": 4600
},
{
"epoch": 2.91,
"learning_rate": 4.183168316831683e-05,
"loss": 0.9088,
"step": 4700
},
{
"epoch": 2.97,
"learning_rate": 4.05940594059406e-05,
"loss": 0.9111,
"step": 4800
},
{
"epoch": 3.0,
"eval_loss": 1.101236343383789,
"eval_runtime": 102.4816,
"eval_samples_per_second": 35.48,
"eval_steps_per_second": 8.87,
"step": 4848
},
{
"epoch": 3.03,
"learning_rate": 3.935643564356436e-05,
"loss": 0.8905,
"step": 4900
},
{
"epoch": 3.09,
"learning_rate": 3.811881188118812e-05,
"loss": 0.8628,
"step": 5000
},
{
"epoch": 3.16,
"learning_rate": 3.6881188118811884e-05,
"loss": 0.8712,
"step": 5100
},
{
"epoch": 3.22,
"learning_rate": 3.5643564356435645e-05,
"loss": 0.8545,
"step": 5200
},
{
"epoch": 3.28,
"learning_rate": 3.440594059405941e-05,
"loss": 0.8793,
"step": 5300
},
{
"epoch": 3.34,
"learning_rate": 3.3168316831683175e-05,
"loss": 0.8677,
"step": 5400
},
{
"epoch": 3.4,
"learning_rate": 3.1930693069306936e-05,
"loss": 0.8716,
"step": 5500
},
{
"epoch": 3.47,
"learning_rate": 3.06930693069307e-05,
"loss": 0.8708,
"step": 5600
},
{
"epoch": 3.53,
"learning_rate": 2.9455445544554455e-05,
"loss": 0.8782,
"step": 5700
},
{
"epoch": 3.59,
"learning_rate": 2.8217821782178216e-05,
"loss": 0.8651,
"step": 5800
},
{
"epoch": 3.65,
"learning_rate": 2.6980198019801985e-05,
"loss": 0.8597,
"step": 5900
},
{
"epoch": 3.71,
"learning_rate": 2.5742574257425746e-05,
"loss": 0.8516,
"step": 6000
},
{
"epoch": 3.77,
"learning_rate": 2.4504950495049507e-05,
"loss": 0.8759,
"step": 6100
},
{
"epoch": 3.84,
"learning_rate": 2.326732673267327e-05,
"loss": 0.8631,
"step": 6200
},
{
"epoch": 3.9,
"learning_rate": 2.202970297029703e-05,
"loss": 0.8636,
"step": 6300
},
{
"epoch": 3.96,
"learning_rate": 2.079207920792079e-05,
"loss": 0.8691,
"step": 6400
},
{
"epoch": 4.0,
"eval_loss": 1.1012225151062012,
"eval_runtime": 102.0015,
"eval_samples_per_second": 35.647,
"eval_steps_per_second": 8.912,
"step": 6464
},
{
"epoch": 4.02,
"learning_rate": 1.9554455445544556e-05,
"loss": 0.8573,
"step": 6500
},
{
"epoch": 4.08,
"learning_rate": 1.8316831683168317e-05,
"loss": 0.8536,
"step": 6600
},
{
"epoch": 4.15,
"learning_rate": 1.707920792079208e-05,
"loss": 0.8329,
"step": 6700
},
{
"epoch": 4.21,
"learning_rate": 1.5841584158415843e-05,
"loss": 0.8407,
"step": 6800
},
{
"epoch": 4.27,
"learning_rate": 1.4603960396039604e-05,
"loss": 0.8525,
"step": 6900
},
{
"epoch": 4.33,
"learning_rate": 1.3366336633663367e-05,
"loss": 0.8473,
"step": 7000
},
{
"epoch": 4.39,
"learning_rate": 1.2128712871287128e-05,
"loss": 0.825,
"step": 7100
},
{
"epoch": 4.46,
"learning_rate": 1.0891089108910891e-05,
"loss": 0.834,
"step": 7200
},
{
"epoch": 4.52,
"learning_rate": 9.653465346534654e-06,
"loss": 0.8397,
"step": 7300
},
{
"epoch": 4.58,
"learning_rate": 8.415841584158417e-06,
"loss": 0.8436,
"step": 7400
},
{
"epoch": 4.64,
"learning_rate": 7.178217821782178e-06,
"loss": 0.8353,
"step": 7500
},
{
"epoch": 4.7,
"learning_rate": 5.940594059405941e-06,
"loss": 0.8332,
"step": 7600
},
{
"epoch": 4.76,
"learning_rate": 4.702970297029704e-06,
"loss": 0.8402,
"step": 7700
},
{
"epoch": 4.83,
"learning_rate": 3.4653465346534657e-06,
"loss": 0.8526,
"step": 7800
},
{
"epoch": 4.89,
"learning_rate": 2.227722772277228e-06,
"loss": 0.827,
"step": 7900
},
{
"epoch": 4.95,
"learning_rate": 9.900990099009902e-07,
"loss": 0.8543,
"step": 8000
},
{
"epoch": 5.0,
"eval_loss": 1.103607177734375,
"eval_runtime": 102.3147,
"eval_samples_per_second": 35.537,
"eval_steps_per_second": 8.884,
"step": 8080
},
{
"epoch": 5.0,
"step": 8080,
"total_flos": 1.574277938675712e+17,
"train_loss": 0.9537819692403964,
"train_runtime": 26215.2524,
"train_samples_per_second": 9.861,
"train_steps_per_second": 0.308
}
],
"logging_steps": 100,
"max_steps": 8080,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1.574277938675712e+17,
"trial_name": null,
"trial_params": null
}