gpt2-sft-lima / trainer_state.json
pkarypis's picture
Model save
5605815 verified
raw
history blame
No virus
2.66 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 48,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.041666666666666664,
"grad_norm": 6.875,
"learning_rate": 4.000000000000001e-06,
"loss": 3.886,
"step": 1
},
{
"epoch": 0.20833333333333334,
"grad_norm": 5.15625,
"learning_rate": 2e-05,
"loss": 3.5195,
"step": 5
},
{
"epoch": 0.4166666666666667,
"grad_norm": 3.546875,
"learning_rate": 1.9340161087325483e-05,
"loss": 3.6848,
"step": 10
},
{
"epoch": 0.625,
"grad_norm": 3.0625,
"learning_rate": 1.744772182743782e-05,
"loss": 3.5055,
"step": 15
},
{
"epoch": 0.8333333333333334,
"grad_norm": 3.0,
"learning_rate": 1.4572423233046386e-05,
"loss": 3.4931,
"step": 20
},
{
"epoch": 1.0,
"eval_loss": 5.596227169036865,
"eval_runtime": 0.0423,
"eval_samples_per_second": 283.93,
"eval_steps_per_second": 47.322,
"step": 24
},
{
"epoch": 1.0416666666666667,
"grad_norm": 2.90625,
"learning_rate": 1.1093712083778748e-05,
"loss": 3.4722,
"step": 25
},
{
"epoch": 1.25,
"grad_norm": 2.828125,
"learning_rate": 7.470666176083193e-06,
"loss": 3.4108,
"step": 30
},
{
"epoch": 1.4583333333333333,
"grad_norm": 2.5,
"learning_rate": 4.181410844420473e-06,
"loss": 3.4187,
"step": 35
},
{
"epoch": 1.6666666666666665,
"grad_norm": 2.8125,
"learning_rate": 1.660021821101222e-06,
"loss": 3.4852,
"step": 40
},
{
"epoch": 1.875,
"grad_norm": 2.484375,
"learning_rate": 2.392412244407294e-07,
"loss": 3.4278,
"step": 45
},
{
"epoch": 2.0,
"eval_loss": 5.484647274017334,
"eval_runtime": 0.0423,
"eval_samples_per_second": 283.999,
"eval_steps_per_second": 47.333,
"step": 48
},
{
"epoch": 2.0,
"step": 48,
"total_flos": 394600362541056.0,
"train_loss": 3.4869212259848914,
"train_runtime": 14.4504,
"train_samples_per_second": 102.281,
"train_steps_per_second": 3.322
}
],
"logging_steps": 5,
"max_steps": 48,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000000000,
"total_flos": 394600362541056.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}