llama-3-8b-math / trainer_state.json
tjluyao's picture
Upload 14 files
54fc151 verified
raw
history blame contribute delete
No virus
3.66 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.504424778761062,
"eval_steps": 500,
"global_step": 170,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08849557522123894,
"grad_norm": 0.31797459721565247,
"learning_rate": 9.91645696388268e-05,
"loss": 0.8613,
"step": 10
},
{
"epoch": 0.17699115044247787,
"grad_norm": 0.7891760468482971,
"learning_rate": 9.665301848904975e-05,
"loss": 0.7427,
"step": 20
},
{
"epoch": 0.26548672566371684,
"grad_norm": 1.1392697095870972,
"learning_rate": 9.255088935784784e-05,
"loss": 0.7273,
"step": 30
},
{
"epoch": 0.35398230088495575,
"grad_norm": 0.6338401436805725,
"learning_rate": 8.699803935381485e-05,
"loss": 0.6516,
"step": 40
},
{
"epoch": 0.4424778761061947,
"grad_norm": 1.0638246536254883,
"learning_rate": 8.018378615106108e-05,
"loss": 0.6676,
"step": 50
},
{
"epoch": 0.5309734513274337,
"grad_norm": 1.0774465799331665,
"learning_rate": 7.23404534331376e-05,
"loss": 0.6645,
"step": 60
},
{
"epoch": 0.6194690265486725,
"grad_norm": 0.48238104581832886,
"learning_rate": 6.373545009932168e-05,
"loss": 0.6528,
"step": 70
},
{
"epoch": 0.7079646017699115,
"grad_norm": 0.50417160987854,
"learning_rate": 5.466215328310079e-05,
"loss": 0.6333,
"step": 80
},
{
"epoch": 0.7964601769911505,
"grad_norm": 1.2405716180801392,
"learning_rate": 4.542990601526297e-05,
"loss": 0.6844,
"step": 90
},
{
"epoch": 0.8849557522123894,
"grad_norm": 0.8654212951660156,
"learning_rate": 3.635347054911746e-05,
"loss": 0.5696,
"step": 100
},
{
"epoch": 0.9734513274336283,
"grad_norm": 0.8581073880195618,
"learning_rate": 2.774229692390805e-05,
"loss": 0.5922,
"step": 110
},
{
"epoch": 1.0619469026548674,
"grad_norm": 0.6241644024848938,
"learning_rate": 1.9889972641710248e-05,
"loss": 0.5009,
"step": 120
},
{
"epoch": 1.1504424778761062,
"grad_norm": 0.6132310032844543,
"learning_rate": 1.3064213158260386e-05,
"loss": 0.5355,
"step": 130
},
{
"epoch": 1.238938053097345,
"grad_norm": 0.7218645215034485,
"learning_rate": 7.497734449769639e-06,
"loss": 0.5312,
"step": 140
},
{
"epoch": 1.3274336283185841,
"grad_norm": 0.5715582370758057,
"learning_rate": 3.380318844467728e-06,
"loss": 0.5281,
"step": 150
},
{
"epoch": 1.415929203539823,
"grad_norm": 0.8143259882926941,
"learning_rate": 8.523446247096445e-07,
"loss": 0.5436,
"step": 160
},
{
"epoch": 1.504424778761062,
"grad_norm": 0.8244267106056213,
"learning_rate": 0.0,
"loss": 0.4617,
"step": 170
},
{
"epoch": 1.504424778761062,
"step": 170,
"total_flos": 3.3074842059669504e+16,
"train_loss": 0.6204830337973202,
"train_runtime": 372.3105,
"train_samples_per_second": 3.626,
"train_steps_per_second": 0.457
}
],
"logging_steps": 10,
"max_steps": 170,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 3.3074842059669504e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}