codes / trainer_state.json
Daoguang's picture
Upload 17 files
e4c1cd7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.8979591836734695,
"eval_steps": 500,
"global_step": 90,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"learning_rate": 5e-05,
"loss": 0.9113,
"step": 2
},
{
"epoch": 0.22,
"learning_rate": 5e-05,
"loss": 0.7869,
"step": 4
},
{
"epoch": 0.33,
"learning_rate": 5e-05,
"loss": 0.7093,
"step": 6
},
{
"epoch": 0.44,
"learning_rate": 5e-05,
"loss": 0.6995,
"step": 8
},
{
"epoch": 0.54,
"learning_rate": 5e-05,
"loss": 0.688,
"step": 10
},
{
"epoch": 0.65,
"learning_rate": 5e-05,
"loss": 0.6936,
"step": 12
},
{
"epoch": 0.76,
"learning_rate": 5e-05,
"loss": 0.6948,
"step": 14
},
{
"epoch": 0.87,
"learning_rate": 5e-05,
"loss": 0.7126,
"step": 16
},
{
"epoch": 0.98,
"learning_rate": 5e-05,
"loss": 0.6552,
"step": 18
},
{
"epoch": 1.09,
"learning_rate": 5e-05,
"loss": 0.5185,
"step": 20
},
{
"epoch": 1.2,
"learning_rate": 5e-05,
"loss": 0.4545,
"step": 22
},
{
"epoch": 1.31,
"learning_rate": 5e-05,
"loss": 0.4339,
"step": 24
},
{
"epoch": 1.41,
"learning_rate": 5e-05,
"loss": 0.4015,
"step": 26
},
{
"epoch": 1.52,
"learning_rate": 5e-05,
"loss": 0.3998,
"step": 28
},
{
"epoch": 1.63,
"learning_rate": 5e-05,
"loss": 0.4085,
"step": 30
},
{
"epoch": 1.74,
"learning_rate": 5e-05,
"loss": 0.3968,
"step": 32
},
{
"epoch": 1.85,
"learning_rate": 5e-05,
"loss": 0.3707,
"step": 34
},
{
"epoch": 1.96,
"learning_rate": 5e-05,
"loss": 0.3904,
"step": 36
},
{
"epoch": 2.07,
"learning_rate": 5e-05,
"loss": 0.2891,
"step": 38
},
{
"epoch": 2.18,
"learning_rate": 5e-05,
"loss": 0.2091,
"step": 40
},
{
"epoch": 2.29,
"learning_rate": 5e-05,
"loss": 0.1942,
"step": 42
},
{
"epoch": 2.39,
"learning_rate": 5e-05,
"loss": 0.188,
"step": 44
},
{
"epoch": 2.5,
"learning_rate": 5e-05,
"loss": 0.1921,
"step": 46
},
{
"epoch": 2.61,
"learning_rate": 5e-05,
"loss": 0.1812,
"step": 48
},
{
"epoch": 2.72,
"learning_rate": 5e-05,
"loss": 0.1746,
"step": 50
},
{
"epoch": 2.83,
"learning_rate": 5e-05,
"loss": 0.1867,
"step": 52
},
{
"epoch": 2.94,
"learning_rate": 5e-05,
"loss": 0.1831,
"step": 54
},
{
"epoch": 3.05,
"learning_rate": 5e-05,
"loss": 0.1302,
"step": 56
},
{
"epoch": 3.16,
"learning_rate": 5e-05,
"loss": 0.0841,
"step": 58
},
{
"epoch": 3.27,
"learning_rate": 5e-05,
"loss": 0.0747,
"step": 60
},
{
"epoch": 3.37,
"learning_rate": 5e-05,
"loss": 0.0754,
"step": 62
},
{
"epoch": 3.48,
"learning_rate": 5e-05,
"loss": 0.075,
"step": 64
},
{
"epoch": 3.59,
"learning_rate": 5e-05,
"loss": 0.0763,
"step": 66
},
{
"epoch": 3.7,
"learning_rate": 5e-05,
"loss": 0.0761,
"step": 68
},
{
"epoch": 3.81,
"learning_rate": 5e-05,
"loss": 0.0789,
"step": 70
},
{
"epoch": 3.92,
"learning_rate": 5e-05,
"loss": 0.0757,
"step": 72
},
{
"epoch": 4.03,
"learning_rate": 5e-05,
"loss": 0.0757,
"step": 74
},
{
"epoch": 4.14,
"learning_rate": 5e-05,
"loss": 0.0337,
"step": 76
},
{
"epoch": 4.24,
"learning_rate": 5e-05,
"loss": 0.0377,
"step": 78
},
{
"epoch": 4.35,
"learning_rate": 5e-05,
"loss": 0.0373,
"step": 80
},
{
"epoch": 4.46,
"learning_rate": 5e-05,
"loss": 0.0388,
"step": 82
},
{
"epoch": 4.57,
"learning_rate": 5e-05,
"loss": 0.0433,
"step": 84
},
{
"epoch": 4.68,
"learning_rate": 5e-05,
"loss": 0.04,
"step": 86
},
{
"epoch": 4.79,
"learning_rate": 5e-05,
"loss": 0.0487,
"step": 88
},
{
"epoch": 4.9,
"learning_rate": 5e-05,
"loss": 0.0499,
"step": 90
}
],
"logging_steps": 2,
"max_steps": 90,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 30,
"total_flos": 65847448043520.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}