zephyr-7b-gemma-sft / trainer_state.json
yimingzhang's picture
Model save
3e63660 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.571428571428571,
"eval_steps": 500,
"global_step": 9,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2857142857142857,
"grad_norm": 16229.494205476607,
"learning_rate": 2e-05,
"loss": 63.281,
"step": 1
},
{
"epoch": 0.8571428571428571,
"eval_loss": 31.527084350585938,
"eval_runtime": 0.8623,
"eval_samples_per_second": 24.353,
"eval_steps_per_second": 1.16,
"step": 3
},
{
"epoch": 1.4285714285714286,
"grad_norm": 20846.304790427996,
"learning_rate": 1e-05,
"loss": 53.5176,
"step": 5
},
{
"epoch": 2.0,
"eval_loss": 24.949296951293945,
"eval_runtime": 0.8352,
"eval_samples_per_second": 25.143,
"eval_steps_per_second": 1.197,
"step": 7
},
{
"epoch": 2.571428571428571,
"eval_loss": 24.174694061279297,
"eval_runtime": 0.8231,
"eval_samples_per_second": 25.515,
"eval_steps_per_second": 1.215,
"step": 9
},
{
"epoch": 2.571428571428571,
"step": 9,
"total_flos": 2409879306240.0,
"train_loss": 42.482702043321396,
"train_runtime": 189.679,
"train_samples_per_second": 6.643,
"train_steps_per_second": 0.047
}
],
"logging_steps": 5,
"max_steps": 9,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2409879306240.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}