llama2-7b-wo-medication_qa-sft / trainer_state.json
Minbyul's picture
Model save
b215e9b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.769230769230769,
"eval_steps": 500,
"global_step": 18,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"grad_norm": 1.559802270540089,
"learning_rate": 1e-05,
"loss": 1.1179,
"step": 1
},
{
"epoch": 0.77,
"grad_norm": 2.222573814581267,
"learning_rate": 1.8314696123025456e-05,
"loss": 1.1052,
"step": 5
},
{
"epoch": 0.92,
"eval_loss": 1.2975958585739136,
"eval_runtime": 4.0078,
"eval_samples_per_second": 11.478,
"eval_steps_per_second": 0.749,
"step": 6
},
{
"epoch": 1.54,
"grad_norm": 0.8420885861640808,
"learning_rate": 1e-05,
"loss": 0.9691,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 1.245834469795227,
"eval_runtime": 4.0931,
"eval_samples_per_second": 11.238,
"eval_steps_per_second": 0.733,
"step": 13
},
{
"epoch": 2.31,
"grad_norm": 0.6870008084653193,
"learning_rate": 1.6853038769745466e-06,
"loss": 0.871,
"step": 15
},
{
"epoch": 2.77,
"eval_loss": 1.233281135559082,
"eval_runtime": 4.1037,
"eval_samples_per_second": 11.21,
"eval_steps_per_second": 0.731,
"step": 18
},
{
"epoch": 2.77,
"step": 18,
"total_flos": 3716488888320.0,
"train_loss": 0.9516304201549954,
"train_runtime": 305.4482,
"train_samples_per_second": 3.978,
"train_steps_per_second": 0.059
}
],
"logging_steps": 5,
"max_steps": 18,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 3716488888320.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}