mt5_base-qg-aas-oficial / trainer_state.json
tiagoblima's picture
End of training
dcb0611 verified
{
"best_metric": 1.5882539749145508,
"best_model_checkpoint": "/temp/mt5_base-qg-aas-oficial/checkpoint-6930",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 6930,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 9.855699855699856e-05,
"loss": 10.3838,
"step": 100
},
{
"epoch": 0.14,
"learning_rate": 9.711399711399713e-05,
"loss": 3.4535,
"step": 200
},
{
"epoch": 0.22,
"learning_rate": 9.567099567099568e-05,
"loss": 2.5236,
"step": 300
},
{
"epoch": 0.29,
"learning_rate": 9.422799422799424e-05,
"loss": 2.3734,
"step": 400
},
{
"epoch": 0.36,
"learning_rate": 9.278499278499279e-05,
"loss": 2.2729,
"step": 500
},
{
"epoch": 0.43,
"learning_rate": 9.134199134199136e-05,
"loss": 2.2424,
"step": 600
},
{
"epoch": 0.51,
"learning_rate": 8.98989898989899e-05,
"loss": 2.157,
"step": 700
},
{
"epoch": 0.58,
"learning_rate": 8.845598845598845e-05,
"loss": 2.1577,
"step": 800
},
{
"epoch": 0.65,
"learning_rate": 8.701298701298701e-05,
"loss": 2.0695,
"step": 900
},
{
"epoch": 0.72,
"learning_rate": 8.556998556998557e-05,
"loss": 2.0293,
"step": 1000
},
{
"epoch": 0.79,
"learning_rate": 8.412698412698413e-05,
"loss": 2.0455,
"step": 1100
},
{
"epoch": 0.87,
"learning_rate": 8.268398268398268e-05,
"loss": 2.0116,
"step": 1200
},
{
"epoch": 0.94,
"learning_rate": 8.124098124098124e-05,
"loss": 1.9679,
"step": 1300
},
{
"epoch": 1.0,
"eval_loss": 1.6873297691345215,
"eval_runtime": 32.2612,
"eval_samples_per_second": 196.118,
"eval_steps_per_second": 3.069,
"step": 1386
},
{
"epoch": 1.01,
"learning_rate": 7.97979797979798e-05,
"loss": 1.9474,
"step": 1400
},
{
"epoch": 1.08,
"learning_rate": 7.835497835497836e-05,
"loss": 1.8847,
"step": 1500
},
{
"epoch": 1.15,
"learning_rate": 7.691197691197691e-05,
"loss": 1.8713,
"step": 1600
},
{
"epoch": 1.23,
"learning_rate": 7.546897546897548e-05,
"loss": 1.8745,
"step": 1700
},
{
"epoch": 1.3,
"learning_rate": 7.402597402597404e-05,
"loss": 1.8253,
"step": 1800
},
{
"epoch": 1.37,
"learning_rate": 7.258297258297259e-05,
"loss": 1.8577,
"step": 1900
},
{
"epoch": 1.44,
"learning_rate": 7.113997113997114e-05,
"loss": 1.8153,
"step": 2000
},
{
"epoch": 1.52,
"learning_rate": 6.96969696969697e-05,
"loss": 1.8273,
"step": 2100
},
{
"epoch": 1.59,
"learning_rate": 6.825396825396825e-05,
"loss": 1.8135,
"step": 2200
},
{
"epoch": 1.66,
"learning_rate": 6.681096681096681e-05,
"loss": 1.8156,
"step": 2300
},
{
"epoch": 1.73,
"learning_rate": 6.536796536796536e-05,
"loss": 1.8122,
"step": 2400
},
{
"epoch": 1.8,
"learning_rate": 6.392496392496393e-05,
"loss": 1.8016,
"step": 2500
},
{
"epoch": 1.88,
"learning_rate": 6.248196248196248e-05,
"loss": 1.8018,
"step": 2600
},
{
"epoch": 1.95,
"learning_rate": 6.103896103896104e-05,
"loss": 1.7971,
"step": 2700
},
{
"epoch": 2.0,
"eval_loss": 1.6266478300094604,
"eval_runtime": 32.2462,
"eval_samples_per_second": 196.209,
"eval_steps_per_second": 3.07,
"step": 2772
},
{
"epoch": 2.02,
"learning_rate": 5.959595959595959e-05,
"loss": 1.7425,
"step": 2800
},
{
"epoch": 2.09,
"learning_rate": 5.815295815295816e-05,
"loss": 1.7245,
"step": 2900
},
{
"epoch": 2.16,
"learning_rate": 5.6709956709956715e-05,
"loss": 1.6993,
"step": 3000
},
{
"epoch": 2.24,
"learning_rate": 5.526695526695527e-05,
"loss": 1.7235,
"step": 3100
},
{
"epoch": 2.31,
"learning_rate": 5.382395382395382e-05,
"loss": 1.7153,
"step": 3200
},
{
"epoch": 2.38,
"learning_rate": 5.2380952380952384e-05,
"loss": 1.6814,
"step": 3300
},
{
"epoch": 2.45,
"learning_rate": 5.093795093795094e-05,
"loss": 1.6935,
"step": 3400
},
{
"epoch": 2.53,
"learning_rate": 4.94949494949495e-05,
"loss": 1.6631,
"step": 3500
},
{
"epoch": 2.6,
"learning_rate": 4.8051948051948054e-05,
"loss": 1.6955,
"step": 3600
},
{
"epoch": 2.67,
"learning_rate": 4.6608946608946615e-05,
"loss": 1.681,
"step": 3700
},
{
"epoch": 2.74,
"learning_rate": 4.516594516594517e-05,
"loss": 1.6911,
"step": 3800
},
{
"epoch": 2.81,
"learning_rate": 4.3722943722943724e-05,
"loss": 1.6962,
"step": 3900
},
{
"epoch": 2.89,
"learning_rate": 4.227994227994228e-05,
"loss": 1.6871,
"step": 4000
},
{
"epoch": 2.96,
"learning_rate": 4.083694083694084e-05,
"loss": 1.6798,
"step": 4100
},
{
"epoch": 3.0,
"eval_loss": 1.5993900299072266,
"eval_runtime": 32.2465,
"eval_samples_per_second": 196.207,
"eval_steps_per_second": 3.07,
"step": 4158
},
{
"epoch": 3.03,
"learning_rate": 3.939393939393939e-05,
"loss": 1.6521,
"step": 4200
},
{
"epoch": 3.1,
"learning_rate": 3.7950937950937954e-05,
"loss": 1.6179,
"step": 4300
},
{
"epoch": 3.17,
"learning_rate": 3.650793650793651e-05,
"loss": 1.6063,
"step": 4400
},
{
"epoch": 3.25,
"learning_rate": 3.506493506493507e-05,
"loss": 1.625,
"step": 4500
},
{
"epoch": 3.32,
"learning_rate": 3.3621933621933624e-05,
"loss": 1.6208,
"step": 4600
},
{
"epoch": 3.39,
"learning_rate": 3.217893217893218e-05,
"loss": 1.6281,
"step": 4700
},
{
"epoch": 3.46,
"learning_rate": 3.073593073593073e-05,
"loss": 1.6049,
"step": 4800
},
{
"epoch": 3.54,
"learning_rate": 2.9292929292929294e-05,
"loss": 1.5794,
"step": 4900
},
{
"epoch": 3.61,
"learning_rate": 2.7849927849927855e-05,
"loss": 1.6161,
"step": 5000
},
{
"epoch": 3.68,
"learning_rate": 2.640692640692641e-05,
"loss": 1.611,
"step": 5100
},
{
"epoch": 3.75,
"learning_rate": 2.4963924963924963e-05,
"loss": 1.6126,
"step": 5200
},
{
"epoch": 3.82,
"learning_rate": 2.352092352092352e-05,
"loss": 1.6125,
"step": 5300
},
{
"epoch": 3.9,
"learning_rate": 2.207792207792208e-05,
"loss": 1.6168,
"step": 5400
},
{
"epoch": 3.97,
"learning_rate": 2.0634920634920636e-05,
"loss": 1.6261,
"step": 5500
},
{
"epoch": 4.0,
"eval_loss": 1.5885429382324219,
"eval_runtime": 32.2346,
"eval_samples_per_second": 196.28,
"eval_steps_per_second": 3.071,
"step": 5544
},
{
"epoch": 4.04,
"learning_rate": 1.919191919191919e-05,
"loss": 1.572,
"step": 5600
},
{
"epoch": 4.11,
"learning_rate": 1.7748917748917752e-05,
"loss": 1.5608,
"step": 5700
},
{
"epoch": 4.18,
"learning_rate": 1.630591630591631e-05,
"loss": 1.5693,
"step": 5800
},
{
"epoch": 4.26,
"learning_rate": 1.4862914862914865e-05,
"loss": 1.5761,
"step": 5900
},
{
"epoch": 4.33,
"learning_rate": 1.3419913419913421e-05,
"loss": 1.5722,
"step": 6000
},
{
"epoch": 4.4,
"learning_rate": 1.1976911976911977e-05,
"loss": 1.5814,
"step": 6100
},
{
"epoch": 4.47,
"learning_rate": 1.0533910533910535e-05,
"loss": 1.5865,
"step": 6200
},
{
"epoch": 4.55,
"learning_rate": 9.090909090909091e-06,
"loss": 1.5635,
"step": 6300
},
{
"epoch": 4.62,
"learning_rate": 7.647907647907649e-06,
"loss": 1.5789,
"step": 6400
},
{
"epoch": 4.69,
"learning_rate": 6.204906204906205e-06,
"loss": 1.5731,
"step": 6500
},
{
"epoch": 4.76,
"learning_rate": 4.7619047619047615e-06,
"loss": 1.5402,
"step": 6600
},
{
"epoch": 4.83,
"learning_rate": 3.318903318903319e-06,
"loss": 1.5828,
"step": 6700
},
{
"epoch": 4.91,
"learning_rate": 1.875901875901876e-06,
"loss": 1.5648,
"step": 6800
},
{
"epoch": 4.98,
"learning_rate": 4.329004329004329e-07,
"loss": 1.5887,
"step": 6900
},
{
"epoch": 5.0,
"eval_loss": 1.5882539749145508,
"eval_runtime": 32.2402,
"eval_samples_per_second": 196.246,
"eval_steps_per_second": 3.071,
"step": 6930
},
{
"epoch": 5.0,
"step": 6930,
"total_flos": 2.6579255601659904e+17,
"train_loss": 1.9092482425089694,
"train_runtime": 4641.0606,
"train_samples_per_second": 47.763,
"train_steps_per_second": 1.493
}
],
"logging_steps": 100,
"max_steps": 6930,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 2.6579255601659904e+17,
"trial_name": null,
"trial_params": null
}