indo-t5-base-nusax / trainer_state.json
w11wo's picture
End of training
05e1349
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.024096385542169,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 4e-05,
"loss": 6.9946,
"step": 10
},
{
"epoch": 0.24,
"learning_rate": 8e-05,
"loss": 5.455,
"step": 20
},
{
"epoch": 0.36,
"learning_rate": 0.00012,
"loss": 4.45,
"step": 30
},
{
"epoch": 0.48,
"learning_rate": 0.00016,
"loss": 3.8135,
"step": 40
},
{
"epoch": 0.6,
"learning_rate": 0.0002,
"loss": 3.4829,
"step": 50
},
{
"epoch": 0.72,
"learning_rate": 0.00019555555555555556,
"loss": 3.1942,
"step": 60
},
{
"epoch": 0.84,
"learning_rate": 0.00019111111111111114,
"loss": 2.9794,
"step": 70
},
{
"epoch": 0.96,
"learning_rate": 0.0001866666666666667,
"loss": 2.8266,
"step": 80
},
{
"epoch": 1.08,
"learning_rate": 0.00018222222222222224,
"loss": 2.6337,
"step": 90
},
{
"epoch": 1.2,
"learning_rate": 0.00017777777777777779,
"loss": 2.5671,
"step": 100
},
{
"epoch": 1.33,
"learning_rate": 0.00017333333333333334,
"loss": 2.4814,
"step": 110
},
{
"epoch": 1.45,
"learning_rate": 0.00016888888888888889,
"loss": 2.4243,
"step": 120
},
{
"epoch": 1.57,
"learning_rate": 0.00016444444444444444,
"loss": 2.3516,
"step": 130
},
{
"epoch": 1.69,
"learning_rate": 0.00016,
"loss": 2.3141,
"step": 140
},
{
"epoch": 1.81,
"learning_rate": 0.00015555555555555556,
"loss": 2.2503,
"step": 150
},
{
"epoch": 1.93,
"learning_rate": 0.0001511111111111111,
"loss": 2.1908,
"step": 160
},
{
"epoch": 2.05,
"learning_rate": 0.00014666666666666666,
"loss": 2.1537,
"step": 170
},
{
"epoch": 2.17,
"learning_rate": 0.00014222222222222224,
"loss": 2.0442,
"step": 180
},
{
"epoch": 2.29,
"learning_rate": 0.0001377777777777778,
"loss": 2.0096,
"step": 190
},
{
"epoch": 2.41,
"learning_rate": 0.00013333333333333334,
"loss": 1.9876,
"step": 200
},
{
"epoch": 2.53,
"learning_rate": 0.00012888888888888892,
"loss": 1.9789,
"step": 210
},
{
"epoch": 2.65,
"learning_rate": 0.00012444444444444444,
"loss": 1.9375,
"step": 220
},
{
"epoch": 2.77,
"learning_rate": 0.00012,
"loss": 1.9222,
"step": 230
},
{
"epoch": 2.89,
"learning_rate": 0.00011555555555555555,
"loss": 1.8738,
"step": 240
},
{
"epoch": 3.01,
"learning_rate": 0.00011111111111111112,
"loss": 1.8951,
"step": 250
},
{
"epoch": 3.01,
"eval_bleu": 3.9246,
"eval_gen_len": 18.6281,
"eval_loss": 2.265472412109375,
"eval_runtime": 47.3348,
"eval_samples_per_second": 88.73,
"eval_steps_per_second": 0.697,
"step": 250
},
{
"epoch": 3.13,
"learning_rate": 0.00010666666666666667,
"loss": 1.7962,
"step": 260
},
{
"epoch": 3.25,
"learning_rate": 0.00010222222222222222,
"loss": 1.7742,
"step": 270
},
{
"epoch": 3.37,
"learning_rate": 9.777777777777778e-05,
"loss": 1.7561,
"step": 280
},
{
"epoch": 3.49,
"learning_rate": 9.333333333333334e-05,
"loss": 1.7248,
"step": 290
},
{
"epoch": 3.61,
"learning_rate": 8.888888888888889e-05,
"loss": 1.7343,
"step": 300
},
{
"epoch": 3.73,
"learning_rate": 8.444444444444444e-05,
"loss": 1.7053,
"step": 310
},
{
"epoch": 3.86,
"learning_rate": 8e-05,
"loss": 1.6866,
"step": 320
},
{
"epoch": 3.98,
"learning_rate": 7.555555555555556e-05,
"loss": 1.665,
"step": 330
},
{
"epoch": 4.1,
"learning_rate": 7.111111111111112e-05,
"loss": 1.6531,
"step": 340
},
{
"epoch": 4.22,
"learning_rate": 6.666666666666667e-05,
"loss": 1.615,
"step": 350
},
{
"epoch": 4.34,
"learning_rate": 6.222222222222222e-05,
"loss": 1.6093,
"step": 360
},
{
"epoch": 4.46,
"learning_rate": 5.7777777777777776e-05,
"loss": 1.6087,
"step": 370
},
{
"epoch": 4.58,
"learning_rate": 5.333333333333333e-05,
"loss": 1.586,
"step": 380
},
{
"epoch": 4.7,
"learning_rate": 4.888888888888889e-05,
"loss": 1.5655,
"step": 390
},
{
"epoch": 4.82,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.5485,
"step": 400
},
{
"epoch": 4.94,
"learning_rate": 4e-05,
"loss": 1.582,
"step": 410
},
{
"epoch": 5.06,
"learning_rate": 3.555555555555556e-05,
"loss": 1.5616,
"step": 420
},
{
"epoch": 5.18,
"learning_rate": 3.111111111111111e-05,
"loss": 1.5579,
"step": 430
},
{
"epoch": 5.3,
"learning_rate": 2.6666666666666667e-05,
"loss": 1.5261,
"step": 440
},
{
"epoch": 5.42,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.5194,
"step": 450
},
{
"epoch": 5.54,
"learning_rate": 1.777777777777778e-05,
"loss": 1.5099,
"step": 460
},
{
"epoch": 5.66,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.5227,
"step": 470
},
{
"epoch": 5.78,
"learning_rate": 8.88888888888889e-06,
"loss": 1.4967,
"step": 480
},
{
"epoch": 5.9,
"learning_rate": 4.444444444444445e-06,
"loss": 1.5215,
"step": 490
},
{
"epoch": 6.02,
"learning_rate": 0.0,
"loss": 1.5435,
"step": 500
},
{
"epoch": 6.02,
"eval_bleu": 4.1139,
"eval_gen_len": 18.6052,
"eval_loss": 2.260429859161377,
"eval_runtime": 18.0892,
"eval_samples_per_second": 232.183,
"eval_steps_per_second": 1.824,
"step": 500
},
{
"epoch": 6.02,
"step": 500,
"total_flos": 3.792343111041024e+16,
"train_loss": 2.211637247085571,
"train_runtime": 368.6992,
"train_samples_per_second": 347.166,
"train_steps_per_second": 1.356
}
],
"max_steps": 500,
"num_train_epochs": 7,
"total_flos": 3.792343111041024e+16,
"trial_name": null,
"trial_params": null
}