lora-sarcasm-more-llama-3-8b / trainer_state.json
hallisky's picture
Upload 11 files
bf08f63 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.788732394366197,
"eval_steps": 54,
"global_step": 594,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 1.6778770685195923,
"learning_rate": 1.267605633802817e-05,
"loss": 2.0809,
"step": 54
},
{
"epoch": 0.25,
"eval_loss": 2.0533597469329834,
"eval_runtime": 65.0241,
"eval_samples_per_second": 3.46,
"eval_steps_per_second": 0.877,
"step": 54
},
{
"epoch": 0.51,
"grad_norm": 2.9120090007781982,
"learning_rate": 2.535211267605634e-05,
"loss": 1.8984,
"step": 108
},
{
"epoch": 0.51,
"eval_loss": 1.723413109779358,
"eval_runtime": 65.0617,
"eval_samples_per_second": 3.458,
"eval_steps_per_second": 0.876,
"step": 108
},
{
"epoch": 0.76,
"grad_norm": 2.7429661750793457,
"learning_rate": 3.802816901408451e-05,
"loss": 1.6513,
"step": 162
},
{
"epoch": 0.76,
"eval_loss": 1.6305742263793945,
"eval_runtime": 65.0654,
"eval_samples_per_second": 3.458,
"eval_steps_per_second": 0.876,
"step": 162
},
{
"epoch": 1.01,
"grad_norm": 2.668692111968994,
"learning_rate": 4.992175273865415e-05,
"loss": 1.6082,
"step": 216
},
{
"epoch": 1.01,
"eval_loss": 1.5966345071792603,
"eval_runtime": 65.0716,
"eval_samples_per_second": 3.458,
"eval_steps_per_second": 0.876,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 2.2179903984069824,
"learning_rate": 4.85133020344288e-05,
"loss": 1.5274,
"step": 270
},
{
"epoch": 1.27,
"eval_loss": 1.5803054571151733,
"eval_runtime": 65.0649,
"eval_samples_per_second": 3.458,
"eval_steps_per_second": 0.876,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 2.964496374130249,
"learning_rate": 4.710485133020345e-05,
"loss": 1.5474,
"step": 324
},
{
"epoch": 1.52,
"eval_loss": 1.5681735277175903,
"eval_runtime": 65.0649,
"eval_samples_per_second": 3.458,
"eval_steps_per_second": 0.876,
"step": 324
},
{
"epoch": 1.77,
"grad_norm": 2.6218090057373047,
"learning_rate": 4.569640062597809e-05,
"loss": 1.55,
"step": 378
},
{
"epoch": 1.77,
"eval_loss": 1.5585384368896484,
"eval_runtime": 65.0599,
"eval_samples_per_second": 3.458,
"eval_steps_per_second": 0.876,
"step": 378
},
{
"epoch": 2.03,
"grad_norm": 2.9666621685028076,
"learning_rate": 4.428794992175274e-05,
"loss": 1.5002,
"step": 432
},
{
"epoch": 2.03,
"eval_loss": 1.5494067668914795,
"eval_runtime": 65.0722,
"eval_samples_per_second": 3.458,
"eval_steps_per_second": 0.876,
"step": 432
},
{
"epoch": 2.28,
"grad_norm": 2.445936918258667,
"learning_rate": 4.287949921752739e-05,
"loss": 1.4529,
"step": 486
},
{
"epoch": 2.28,
"eval_loss": 1.553564429283142,
"eval_runtime": 65.0624,
"eval_samples_per_second": 3.458,
"eval_steps_per_second": 0.876,
"step": 486
},
{
"epoch": 2.54,
"grad_norm": 3.49705171585083,
"learning_rate": 4.1471048513302035e-05,
"loss": 1.4639,
"step": 540
},
{
"epoch": 2.54,
"eval_loss": 1.5489095449447632,
"eval_runtime": 65.0549,
"eval_samples_per_second": 3.459,
"eval_steps_per_second": 0.876,
"step": 540
},
{
"epoch": 2.79,
"grad_norm": 2.301144599914551,
"learning_rate": 4.0062597809076686e-05,
"loss": 1.463,
"step": 594
},
{
"epoch": 2.79,
"eval_loss": 1.5437979698181152,
"eval_runtime": 65.0526,
"eval_samples_per_second": 3.459,
"eval_steps_per_second": 0.876,
"step": 594
}
],
"logging_steps": 54,
"max_steps": 2130,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 54,
"total_flos": 5.290146072576e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}