llama2_13b_lora_adapter / trainer_state.json
kekunh's picture
End of training
5eba85c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9985772550507446,
"eval_steps": 500,
"global_step": 658,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"grad_norm": 1.3818485736846924,
"learning_rate": 1.984705192213004e-05,
"loss": 1.5534,
"step": 100
},
{
"epoch": 0.3,
"grad_norm": 1.1903014183044434,
"learning_rate": 1.761094828138534e-05,
"loss": 0.8705,
"step": 200
},
{
"epoch": 0.46,
"grad_norm": 1.446919322013855,
"learning_rate": 1.3281315178199537e-05,
"loss": 0.8352,
"step": 300
},
{
"epoch": 0.61,
"grad_norm": 1.4014055728912354,
"learning_rate": 8.04909677983872e-06,
"loss": 0.8368,
"step": 400
},
{
"epoch": 0.76,
"grad_norm": 1.499550461769104,
"learning_rate": 3.3535097208036584e-06,
"loss": 0.8154,
"step": 500
},
{
"epoch": 0.91,
"grad_norm": 1.5235815048217773,
"learning_rate": 4.861604459298697e-07,
"loss": 0.8083,
"step": 600
},
{
"epoch": 1.0,
"step": 658,
"total_flos": 4.878623526912e+16,
"train_loss": 0.9400251382752393,
"train_runtime": 7654.5047,
"train_samples_per_second": 1.377,
"train_steps_per_second": 0.086
}
],
"logging_steps": 100,
"max_steps": 658,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 4.878623526912e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}