llama213bTimeBook / checkpoint-344 /trainer_state.json
Jimmyhd's picture
Upload folder using huggingface_hub
5b476dd verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.20873786407767,
"eval_steps": 500,
"global_step": 344,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"learning_rate": 2.4848484848484847e-05,
"loss": 3.4366,
"step": 41
},
{
"epoch": 1.19,
"learning_rate": 4.9696969696969694e-05,
"loss": 1.3573,
"step": 82
},
{
"epoch": 2.18,
"learning_rate": 7.454545454545455e-05,
"loss": 0.3602,
"step": 123
},
{
"epoch": 3.17,
"learning_rate": 9.939393939393939e-05,
"loss": 0.1629,
"step": 164
},
{
"epoch": 4.16,
"learning_rate": 9.730276466621713e-05,
"loss": 0.1326,
"step": 205
},
{
"epoch": 5.15,
"learning_rate": 9.453809844908968e-05,
"loss": 0.1187,
"step": 246
},
{
"epoch": 6.14,
"learning_rate": 9.177343223196225e-05,
"loss": 0.1073,
"step": 287
},
{
"epoch": 7.13,
"learning_rate": 8.90087660148348e-05,
"loss": 0.0997,
"step": 328
}
],
"logging_steps": 41,
"max_steps": 1648,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"total_flos": 8.094040889229312e+16,
"train_batch_size": 3,
"trial_name": null,
"trial_params": null
}