pile_t5_base_docstring / trainer_state.json
juraj-juraj's picture
End of training
2b8ef3b verified
raw
history blame contribute delete
No virus
6.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 22860,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 500
},
{
"epoch": 0.17,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1000
},
{
"epoch": 0.26,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1500
},
{
"epoch": 0.35,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 2000
},
{
"epoch": 0.44,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 2500
},
{
"epoch": 0.52,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 3000
},
{
"epoch": 0.61,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 3500
},
{
"epoch": 0.7,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 4000
},
{
"epoch": 0.79,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 4500
},
{
"epoch": 0.87,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 5000
},
{
"epoch": 0.96,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 5500
},
{
"epoch": 1.05,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 6000
},
{
"epoch": 1.14,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 6500
},
{
"epoch": 1.22,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 7000
},
{
"epoch": 1.31,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 7500
},
{
"epoch": 1.4,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 8000
},
{
"epoch": 1.49,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 8500
},
{
"epoch": 1.57,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 9000
},
{
"epoch": 1.66,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 9500
},
{
"epoch": 1.75,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 10000
},
{
"epoch": 1.84,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 10500
},
{
"epoch": 1.92,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 11000
},
{
"epoch": 2.01,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 11500
},
{
"epoch": 2.1,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 12000
},
{
"epoch": 2.19,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 12500
},
{
"epoch": 2.27,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 13000
},
{
"epoch": 2.36,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 13500
},
{
"epoch": 2.45,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 14000
},
{
"epoch": 2.54,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 14500
},
{
"epoch": 2.62,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 15000
},
{
"epoch": 2.71,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 15500
},
{
"epoch": 2.8,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 16000
},
{
"epoch": 2.89,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 16500
},
{
"epoch": 2.97,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 17000
},
{
"epoch": 3.06,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 17500
},
{
"epoch": 3.15,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 18000
},
{
"epoch": 3.24,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 18500
},
{
"epoch": 3.32,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 19000
},
{
"epoch": 3.41,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 19500
},
{
"epoch": 3.5,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 20000
},
{
"epoch": 3.59,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 20500
},
{
"epoch": 3.67,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 21000
},
{
"epoch": 3.76,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 21500
},
{
"epoch": 3.85,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 22000
},
{
"epoch": 3.94,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 22500
},
{
"epoch": 4.0,
"step": 22860,
"total_flos": 2.1912235278336e+17,
"train_loss": 0.0,
"train_runtime": 7438.5423,
"train_samples_per_second": 21.51,
"train_steps_per_second": 3.073
}
],
"logging_steps": 500,
"max_steps": 22860,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 4000,
"total_flos": 2.1912235278336e+17,
"train_batch_size": 7,
"trial_name": null,
"trial_params": null
}