mt5_base-qg-aap-oficial / trainer_state.json
tiagoblima's picture
End of training
defb8bd verified
{
"best_metric": 1.0870490074157715,
"best_model_checkpoint": "/temp/mt5_base-qg-aap-oficial/checkpoint-6930",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 6930,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 9.855699855699856e-05,
"loss": 6.7594,
"step": 100
},
{
"epoch": 0.14,
"learning_rate": 9.711399711399713e-05,
"loss": 2.4073,
"step": 200
},
{
"epoch": 0.22,
"learning_rate": 9.567099567099568e-05,
"loss": 2.0998,
"step": 300
},
{
"epoch": 0.29,
"learning_rate": 9.422799422799424e-05,
"loss": 2.0129,
"step": 400
},
{
"epoch": 0.36,
"learning_rate": 9.278499278499279e-05,
"loss": 1.9592,
"step": 500
},
{
"epoch": 0.43,
"learning_rate": 9.134199134199136e-05,
"loss": 1.9332,
"step": 600
},
{
"epoch": 0.51,
"learning_rate": 8.98989898989899e-05,
"loss": 1.8863,
"step": 700
},
{
"epoch": 0.58,
"learning_rate": 8.845598845598845e-05,
"loss": 1.8697,
"step": 800
},
{
"epoch": 0.65,
"learning_rate": 8.701298701298701e-05,
"loss": 1.8081,
"step": 900
},
{
"epoch": 0.72,
"learning_rate": 8.556998556998557e-05,
"loss": 1.7747,
"step": 1000
},
{
"epoch": 0.79,
"learning_rate": 8.412698412698413e-05,
"loss": 1.7912,
"step": 1100
},
{
"epoch": 0.87,
"learning_rate": 8.268398268398268e-05,
"loss": 1.7363,
"step": 1200
},
{
"epoch": 0.94,
"learning_rate": 8.124098124098124e-05,
"loss": 1.7144,
"step": 1300
},
{
"epoch": 1.0,
"eval_loss": 1.331040382385254,
"eval_runtime": 230.6439,
"eval_samples_per_second": 195.605,
"eval_steps_per_second": 3.057,
"step": 1386
},
{
"epoch": 1.01,
"learning_rate": 7.97979797979798e-05,
"loss": 1.6992,
"step": 1400
},
{
"epoch": 1.08,
"learning_rate": 7.835497835497836e-05,
"loss": 1.6193,
"step": 1500
},
{
"epoch": 1.15,
"learning_rate": 7.691197691197691e-05,
"loss": 1.6147,
"step": 1600
},
{
"epoch": 1.23,
"learning_rate": 7.546897546897548e-05,
"loss": 1.6101,
"step": 1700
},
{
"epoch": 1.3,
"learning_rate": 7.402597402597404e-05,
"loss": 1.5736,
"step": 1800
},
{
"epoch": 1.37,
"learning_rate": 7.258297258297259e-05,
"loss": 1.5945,
"step": 1900
},
{
"epoch": 1.44,
"learning_rate": 7.113997113997114e-05,
"loss": 1.5614,
"step": 2000
},
{
"epoch": 1.52,
"learning_rate": 6.96969696969697e-05,
"loss": 1.5883,
"step": 2100
},
{
"epoch": 1.59,
"learning_rate": 6.825396825396825e-05,
"loss": 1.5573,
"step": 2200
},
{
"epoch": 1.66,
"learning_rate": 6.681096681096681e-05,
"loss": 1.5644,
"step": 2300
},
{
"epoch": 1.73,
"learning_rate": 6.536796536796536e-05,
"loss": 1.5579,
"step": 2400
},
{
"epoch": 1.8,
"learning_rate": 6.392496392496393e-05,
"loss": 1.5614,
"step": 2500
},
{
"epoch": 1.88,
"learning_rate": 6.248196248196248e-05,
"loss": 1.5716,
"step": 2600
},
{
"epoch": 1.95,
"learning_rate": 6.103896103896104e-05,
"loss": 1.5556,
"step": 2700
},
{
"epoch": 2.0,
"eval_loss": 1.2120455503463745,
"eval_runtime": 230.4979,
"eval_samples_per_second": 195.728,
"eval_steps_per_second": 3.059,
"step": 2772
},
{
"epoch": 2.02,
"learning_rate": 5.959595959595959e-05,
"loss": 1.5086,
"step": 2800
},
{
"epoch": 2.09,
"learning_rate": 5.815295815295816e-05,
"loss": 1.4864,
"step": 2900
},
{
"epoch": 2.16,
"learning_rate": 5.6709956709956715e-05,
"loss": 1.4518,
"step": 3000
},
{
"epoch": 2.24,
"learning_rate": 5.526695526695527e-05,
"loss": 1.4648,
"step": 3100
},
{
"epoch": 2.31,
"learning_rate": 5.382395382395382e-05,
"loss": 1.4743,
"step": 3200
},
{
"epoch": 2.38,
"learning_rate": 5.2380952380952384e-05,
"loss": 1.4411,
"step": 3300
},
{
"epoch": 2.45,
"learning_rate": 5.093795093795094e-05,
"loss": 1.4554,
"step": 3400
},
{
"epoch": 2.53,
"learning_rate": 4.94949494949495e-05,
"loss": 1.4301,
"step": 3500
},
{
"epoch": 2.6,
"learning_rate": 4.8051948051948054e-05,
"loss": 1.4611,
"step": 3600
},
{
"epoch": 2.67,
"learning_rate": 4.6608946608946615e-05,
"loss": 1.4457,
"step": 3700
},
{
"epoch": 2.74,
"learning_rate": 4.516594516594517e-05,
"loss": 1.4518,
"step": 3800
},
{
"epoch": 2.81,
"learning_rate": 4.3722943722943724e-05,
"loss": 1.4482,
"step": 3900
},
{
"epoch": 2.89,
"learning_rate": 4.227994227994228e-05,
"loss": 1.4529,
"step": 4000
},
{
"epoch": 2.96,
"learning_rate": 4.083694083694084e-05,
"loss": 1.4427,
"step": 4100
},
{
"epoch": 3.0,
"eval_loss": 1.1406371593475342,
"eval_runtime": 230.3581,
"eval_samples_per_second": 195.847,
"eval_steps_per_second": 3.06,
"step": 4158
},
{
"epoch": 3.03,
"learning_rate": 3.939393939393939e-05,
"loss": 1.4129,
"step": 4200
},
{
"epoch": 3.1,
"learning_rate": 3.7950937950937954e-05,
"loss": 1.3668,
"step": 4300
},
{
"epoch": 3.17,
"learning_rate": 3.650793650793651e-05,
"loss": 1.368,
"step": 4400
},
{
"epoch": 3.25,
"learning_rate": 3.506493506493507e-05,
"loss": 1.3795,
"step": 4500
},
{
"epoch": 3.32,
"learning_rate": 3.3621933621933624e-05,
"loss": 1.3774,
"step": 4600
},
{
"epoch": 3.39,
"learning_rate": 3.217893217893218e-05,
"loss": 1.3944,
"step": 4700
},
{
"epoch": 3.46,
"learning_rate": 3.073593073593073e-05,
"loss": 1.3734,
"step": 4800
},
{
"epoch": 3.54,
"learning_rate": 2.9292929292929294e-05,
"loss": 1.3481,
"step": 4900
},
{
"epoch": 3.61,
"learning_rate": 2.7849927849927855e-05,
"loss": 1.3878,
"step": 5000
},
{
"epoch": 3.68,
"learning_rate": 2.640692640692641e-05,
"loss": 1.3746,
"step": 5100
},
{
"epoch": 3.75,
"learning_rate": 2.4963924963924963e-05,
"loss": 1.3806,
"step": 5200
},
{
"epoch": 3.82,
"learning_rate": 2.352092352092352e-05,
"loss": 1.3676,
"step": 5300
},
{
"epoch": 3.9,
"learning_rate": 2.207792207792208e-05,
"loss": 1.3832,
"step": 5400
},
{
"epoch": 3.97,
"learning_rate": 2.0634920634920636e-05,
"loss": 1.3878,
"step": 5500
},
{
"epoch": 4.0,
"eval_loss": 1.0971444845199585,
"eval_runtime": 230.5739,
"eval_samples_per_second": 195.664,
"eval_steps_per_second": 3.058,
"step": 5544
},
{
"epoch": 4.04,
"learning_rate": 1.919191919191919e-05,
"loss": 1.3415,
"step": 5600
},
{
"epoch": 4.11,
"learning_rate": 1.7748917748917752e-05,
"loss": 1.3339,
"step": 5700
},
{
"epoch": 4.18,
"learning_rate": 1.630591630591631e-05,
"loss": 1.3263,
"step": 5800
},
{
"epoch": 4.26,
"learning_rate": 1.4862914862914865e-05,
"loss": 1.3384,
"step": 5900
},
{
"epoch": 4.33,
"learning_rate": 1.3419913419913421e-05,
"loss": 1.3405,
"step": 6000
},
{
"epoch": 4.4,
"learning_rate": 1.1976911976911977e-05,
"loss": 1.344,
"step": 6100
},
{
"epoch": 4.47,
"learning_rate": 1.0533910533910535e-05,
"loss": 1.3368,
"step": 6200
},
{
"epoch": 4.55,
"learning_rate": 9.090909090909091e-06,
"loss": 1.3334,
"step": 6300
},
{
"epoch": 4.62,
"learning_rate": 7.647907647907649e-06,
"loss": 1.3418,
"step": 6400
},
{
"epoch": 4.69,
"learning_rate": 6.204906204906205e-06,
"loss": 1.3358,
"step": 6500
},
{
"epoch": 4.76,
"learning_rate": 4.7619047619047615e-06,
"loss": 1.3286,
"step": 6600
},
{
"epoch": 4.83,
"learning_rate": 3.318903318903319e-06,
"loss": 1.3322,
"step": 6700
},
{
"epoch": 4.91,
"learning_rate": 1.875901875901876e-06,
"loss": 1.3383,
"step": 6800
},
{
"epoch": 4.98,
"learning_rate": 4.329004329004329e-07,
"loss": 1.3626,
"step": 6900
},
{
"epoch": 5.0,
"eval_loss": 1.0870490074157715,
"eval_runtime": 230.5855,
"eval_samples_per_second": 195.654,
"eval_steps_per_second": 3.057,
"step": 6930
},
{
"epoch": 5.0,
"step": 6930,
"total_flos": 2.6579255601659904e+17,
"train_loss": 1.5991850755259416,
"train_runtime": 5659.5127,
"train_samples_per_second": 39.168,
"train_steps_per_second": 1.224
}
],
"logging_steps": 100,
"max_steps": 6930,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 2.6579255601659904e+17,
"trial_name": null,
"trial_params": null
}