llama2-7b-wo-live_qa-sft / trainer_state.json
Minbyul's picture
Model save
59c6ef6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 21,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"grad_norm": 1.3965061424468534,
"learning_rate": 6.666666666666667e-06,
"loss": 1.1352,
"step": 1
},
{
"epoch": 0.71,
"grad_norm": 1.68859600319114,
"learning_rate": 1.9396926207859085e-05,
"loss": 1.1023,
"step": 5
},
{
"epoch": 1.0,
"eval_loss": 1.3855385780334473,
"eval_runtime": 1.3434,
"eval_samples_per_second": 5.955,
"eval_steps_per_second": 0.744,
"step": 7
},
{
"epoch": 1.43,
"grad_norm": 0.9181756094384841,
"learning_rate": 1.342020143325669e-05,
"loss": 0.983,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 1.40538489818573,
"eval_runtime": 1.3773,
"eval_samples_per_second": 5.809,
"eval_steps_per_second": 0.726,
"step": 14
},
{
"epoch": 2.14,
"grad_norm": 0.7917274913498488,
"learning_rate": 5.000000000000003e-06,
"loss": 0.8697,
"step": 15
},
{
"epoch": 2.86,
"grad_norm": 0.6206861453282275,
"learning_rate": 1.519224698779198e-07,
"loss": 0.7795,
"step": 20
},
{
"epoch": 3.0,
"eval_loss": 1.411104440689087,
"eval_runtime": 1.3903,
"eval_samples_per_second": 5.754,
"eval_steps_per_second": 0.719,
"step": 21
},
{
"epoch": 3.0,
"step": 21,
"total_flos": 4344627855360.0,
"train_loss": 0.9259367443266369,
"train_runtime": 345.6861,
"train_samples_per_second": 3.775,
"train_steps_per_second": 0.061
}
],
"logging_steps": 5,
"max_steps": 21,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 4344627855360.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}