gpt2-sft-lima / trainer_state.json
pkarypis's picture
Model save
7a975b9 verified
raw
history blame
No virus
1.56 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 12,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 5.802258301213437,
"learning_rate": 1e-05,
"loss": 3.714,
"step": 1
},
{
"epoch": 0.83,
"grad_norm": 4.228032304421419,
"learning_rate": 1.5877852522924733e-05,
"loss": 3.6492,
"step": 5
},
{
"epoch": 1.0,
"eval_loss": 5.712366104125977,
"eval_runtime": 0.0483,
"eval_samples_per_second": 248.688,
"eval_steps_per_second": 20.724,
"step": 6
},
{
"epoch": 1.67,
"grad_norm": 2.84117612385776,
"learning_rate": 1.9098300562505266e-06,
"loss": 3.457,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 5.508264064788818,
"eval_runtime": 0.0486,
"eval_samples_per_second": 247.068,
"eval_steps_per_second": 20.589,
"step": 12
},
{
"epoch": 2.0,
"step": 12,
"total_flos": 36238786560.0,
"train_loss": 3.5401106079419455,
"train_runtime": 10.4273,
"train_samples_per_second": 141.744,
"train_steps_per_second": 1.151
}
],
"logging_steps": 5,
"max_steps": 12,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000000000,
"total_flos": 36238786560.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}