gemma-7b-sft-full-dolly-v0 / trainer_state.json
lewtun's picture
lewtun HF staff
Model save
48eab3d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.7906976744186047,
"eval_steps": 500,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09,
"grad_norm": 9363.349507841189,
"learning_rate": 6.666666666666667e-06,
"loss": 30.9126,
"step": 1
},
{
"epoch": 0.47,
"grad_norm": 556.2728379648993,
"learning_rate": 1.973044870579824e-05,
"loss": 21.2307,
"step": 5
},
{
"epoch": 0.93,
"grad_norm": 559.6918657048097,
"learning_rate": 1.686241637868734e-05,
"loss": 25.9559,
"step": 10
},
{
"epoch": 1.4,
"grad_norm": 375.2686916285916,
"learning_rate": 1.1736481776669307e-05,
"loss": 18.4454,
"step": 15
},
{
"epoch": 1.86,
"grad_norm": 170.37327814160207,
"learning_rate": 6.039202339608432e-06,
"loss": 16.4221,
"step": 20
},
{
"epoch": 2.33,
"grad_norm": 161.47537049884457,
"learning_rate": 1.6451218858706374e-06,
"loss": 15.3885,
"step": 25
},
{
"epoch": 2.79,
"grad_norm": 138.06585339598334,
"learning_rate": 0.0,
"loss": 14.5343,
"step": 30
},
{
"epoch": 2.79,
"step": 30,
"total_flos": 8193589641216.0,
"train_loss": 18.98553034464518,
"train_runtime": 179.3227,
"train_samples_per_second": 22.819,
"train_steps_per_second": 0.167
}
],
"logging_steps": 5,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 8193589641216.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}