en-pl / trainer_state.json
Zhuoxu Huang
init
b9a2e3f
{
"best_metric": 0.7737660794694032,
"best_model_checkpoint": "2-en-pl-**1-wikispan-*unsup-ensemble-last-64-768-6*-64-768-3e-5-8600/checkpoint-1800**-64-128-3e-5-2600/checkpoint-2501",
"epoch": 1.0,
"global_step": 2501,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"learning_rate": 1.5e-05,
"loss": 34.2531,
"step": 100
},
{
"epoch": 0.08,
"learning_rate": 3e-05,
"loss": 21.6722,
"step": 200
},
{
"epoch": 0.12,
"learning_rate": 2.875e-05,
"loss": 20.8957,
"step": 300
},
{
"epoch": 0.16,
"learning_rate": 2.75e-05,
"loss": 18.1782,
"step": 400
},
{
"epoch": 0.2,
"learning_rate": 2.625e-05,
"loss": 17.6666,
"step": 500
},
{
"epoch": 0.24,
"learning_rate": 2.5e-05,
"loss": 17.1346,
"step": 600
},
{
"epoch": 0.28,
"learning_rate": 2.3749999999999998e-05,
"loss": 15.6721,
"step": 700
},
{
"epoch": 0.32,
"learning_rate": 2.25e-05,
"loss": 15.1408,
"step": 800
},
{
"epoch": 0.36,
"learning_rate": 2.125e-05,
"loss": 14.8461,
"step": 900
},
{
"epoch": 0.4,
"learning_rate": 1.9999999999999998e-05,
"loss": 14.7333,
"step": 1000
},
{
"epoch": 0.44,
"learning_rate": 1.8750000000000002e-05,
"loss": 13.9118,
"step": 1100
},
{
"epoch": 0.48,
"learning_rate": 1.7500000000000002e-05,
"loss": 13.1635,
"step": 1200
},
{
"epoch": 0.52,
"learning_rate": 1.625e-05,
"loss": 12.9291,
"step": 1300
},
{
"epoch": 0.56,
"learning_rate": 1.5e-05,
"loss": 13.5323,
"step": 1400
},
{
"epoch": 0.6,
"learning_rate": 1.375e-05,
"loss": 12.2758,
"step": 1500
},
{
"epoch": 0.64,
"learning_rate": 1.25e-05,
"loss": 12.1152,
"step": 1600
},
{
"epoch": 0.68,
"learning_rate": 1.125e-05,
"loss": 11.9529,
"step": 1700
},
{
"epoch": 0.72,
"learning_rate": 9.999999999999999e-06,
"loss": 11.7075,
"step": 1800
},
{
"epoch": 0.76,
"learning_rate": 8.750000000000001e-06,
"loss": 11.4577,
"step": 1900
},
{
"epoch": 0.8,
"learning_rate": 7.5e-06,
"loss": 10.5324,
"step": 2000
},
{
"epoch": 0.84,
"learning_rate": 6.25e-06,
"loss": 11.4755,
"step": 2100
},
{
"epoch": 0.88,
"learning_rate": 4.9999999999999996e-06,
"loss": 10.8383,
"step": 2200
},
{
"epoch": 0.92,
"learning_rate": 3.75e-06,
"loss": 11.2571,
"step": 2300
},
{
"epoch": 0.96,
"learning_rate": 2.4999999999999998e-06,
"loss": 10.9548,
"step": 2400
},
{
"epoch": 1.0,
"learning_rate": 1.2499999999999999e-06,
"loss": 11.0557,
"step": 2500
}
],
"max_steps": 2600,
"num_train_epochs": 2,
"total_flos": 165708798492672.0,
"trial_name": null,
"trial_params": null
}