Transformers
PyTorch
English
pixel
pretraining
Inference Endpoints
pixel-base / last-checkpoint /trainer_state.json
plip's picture
Training in progress, step 30000
b59273f
raw
history blame
4.89 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4581061890146136,
"global_step": 30000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2.9999999999999997e-06,
"loss": 0.8784,
"step": 1000
},
{
"epoch": 0.03,
"learning_rate": 5.999999999999999e-06,
"loss": 0.77,
"step": 2000
},
{
"epoch": 0.05,
"learning_rate": 8.999999999999999e-06,
"loss": 0.7664,
"step": 3000
},
{
"epoch": 0.06,
"learning_rate": 1.1999999999999999e-05,
"loss": 0.7655,
"step": 4000
},
{
"epoch": 0.08,
"learning_rate": 1.4999999999999999e-05,
"loss": 0.765,
"step": 5000
},
{
"epoch": 0.08,
"eval_runtime": 1.3797,
"eval_samples_per_second": 724.791,
"eval_steps_per_second": 11.597,
"step": 5000
},
{
"epoch": 0.09,
"learning_rate": 1.7999999999999997e-05,
"loss": 0.7647,
"step": 6000
},
{
"epoch": 0.11,
"learning_rate": 2.1e-05,
"loss": 0.7644,
"step": 7000
},
{
"epoch": 0.12,
"learning_rate": 2.3999999999999997e-05,
"loss": 0.7638,
"step": 8000
},
{
"epoch": 0.14,
"learning_rate": 2.6999999999999996e-05,
"loss": 0.7633,
"step": 9000
},
{
"epoch": 0.15,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.76,
"step": 10000
},
{
"epoch": 0.15,
"eval_runtime": 1.1376,
"eval_samples_per_second": 879.066,
"eval_steps_per_second": 14.065,
"step": 10000
},
{
"epoch": 0.17,
"learning_rate": 3.2999999999999996e-05,
"loss": 0.7148,
"step": 11000
},
{
"epoch": 0.18,
"learning_rate": 3.5999999999999994e-05,
"loss": 0.6963,
"step": 12000
},
{
"epoch": 0.2,
"learning_rate": 3.9e-05,
"loss": 0.6755,
"step": 13000
},
{
"epoch": 0.21,
"learning_rate": 4.2e-05,
"loss": 0.6516,
"step": 14000
},
{
"epoch": 0.23,
"learning_rate": 4.4999999999999996e-05,
"loss": 0.6412,
"step": 15000
},
{
"epoch": 0.23,
"eval_runtime": 1.1689,
"eval_samples_per_second": 855.472,
"eval_steps_per_second": 13.688,
"step": 15000
},
{
"epoch": 0.24,
"learning_rate": 4.7999999999999994e-05,
"loss": 0.6348,
"step": 16000
},
{
"epoch": 0.26,
"learning_rate": 5.1e-05,
"loss": 0.6295,
"step": 17000
},
{
"epoch": 0.27,
"learning_rate": 5.399999999999999e-05,
"loss": 0.6224,
"step": 18000
},
{
"epoch": 0.29,
"learning_rate": 5.6999999999999996e-05,
"loss": 0.6169,
"step": 19000
},
{
"epoch": 0.31,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.6113,
"step": 20000
},
{
"epoch": 0.31,
"eval_runtime": 1.0179,
"eval_samples_per_second": 982.441,
"eval_steps_per_second": 15.719,
"step": 20000
},
{
"epoch": 0.32,
"learning_rate": 6.299999999999999e-05,
"loss": 0.6074,
"step": 21000
},
{
"epoch": 0.34,
"learning_rate": 6.599999999999999e-05,
"loss": 0.6039,
"step": 22000
},
{
"epoch": 0.35,
"learning_rate": 6.9e-05,
"loss": 0.6005,
"step": 23000
},
{
"epoch": 0.37,
"learning_rate": 7.199999999999999e-05,
"loss": 0.5968,
"step": 24000
},
{
"epoch": 0.38,
"learning_rate": 7.5e-05,
"loss": 0.5932,
"step": 25000
},
{
"epoch": 0.38,
"eval_runtime": 1.1249,
"eval_samples_per_second": 888.989,
"eval_steps_per_second": 14.224,
"step": 25000
},
{
"epoch": 0.4,
"learning_rate": 7.8e-05,
"loss": 0.5912,
"step": 26000
},
{
"epoch": 0.41,
"learning_rate": 8.1e-05,
"loss": 0.58,
"step": 27000
},
{
"epoch": 0.43,
"learning_rate": 8.4e-05,
"loss": 0.5698,
"step": 28000
},
{
"epoch": 0.44,
"learning_rate": 8.699999999999999e-05,
"loss": 0.5639,
"step": 29000
},
{
"epoch": 0.46,
"learning_rate": 8.999999999999999e-05,
"loss": 0.5601,
"step": 30000
},
{
"epoch": 0.46,
"eval_runtime": 1.0096,
"eval_samples_per_second": 990.512,
"eval_steps_per_second": 15.848,
"step": 30000
}
],
"max_steps": 1000000,
"num_train_epochs": 16,
"total_flos": 2.1030078309104144e+21,
"trial_name": null,
"trial_params": null
}