babylm-models / checkpoint-4100 /trainer_state.json
edwardgowsmith's picture
Upload with huggingface_hub
815aa4a
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 32.03103782735209,
"global_step": 4100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.78,
"learning_rate": 1.0000000000000002e-06,
"loss": 10.1861,
"step": 100
},
{
"epoch": 1.56,
"learning_rate": 2.0000000000000003e-06,
"loss": 9.3871,
"step": 200
},
{
"epoch": 2.34,
"learning_rate": 3e-06,
"loss": 8.9208,
"step": 300
},
{
"epoch": 3.12,
"learning_rate": 4.000000000000001e-06,
"loss": 8.5062,
"step": 400
},
{
"epoch": 3.9,
"learning_rate": 5e-06,
"loss": 7.9395,
"step": 500
},
{
"epoch": 4.68,
"learning_rate": 6e-06,
"loss": 7.4515,
"step": 600
},
{
"epoch": 5.47,
"learning_rate": 7.000000000000001e-06,
"loss": 6.9286,
"step": 700
},
{
"epoch": 6.25,
"learning_rate": 8.000000000000001e-06,
"loss": 6.5255,
"step": 800
},
{
"epoch": 7.03,
"learning_rate": 9e-06,
"loss": 6.2869,
"step": 900
},
{
"epoch": 7.81,
"learning_rate": 1e-05,
"loss": 6.1101,
"step": 1000
},
{
"epoch": 8.59,
"learning_rate": 1.1000000000000001e-05,
"loss": 6.0904,
"step": 1100
},
{
"epoch": 9.37,
"learning_rate": 1.2e-05,
"loss": 6.0359,
"step": 1200
},
{
"epoch": 10.16,
"learning_rate": 1.3000000000000001e-05,
"loss": 5.9918,
"step": 1300
},
{
"epoch": 10.93,
"learning_rate": 1.4000000000000001e-05,
"loss": 5.9047,
"step": 1400
},
{
"epoch": 11.71,
"learning_rate": 1.5e-05,
"loss": 5.9245,
"step": 1500
},
{
"epoch": 12.5,
"learning_rate": 1.6000000000000003e-05,
"loss": 5.8934,
"step": 1600
},
{
"epoch": 13.28,
"learning_rate": 1.7000000000000003e-05,
"loss": 5.872,
"step": 1700
},
{
"epoch": 14.06,
"learning_rate": 1.8e-05,
"loss": 5.8497,
"step": 1800
},
{
"epoch": 14.84,
"learning_rate": 1.9e-05,
"loss": 5.7714,
"step": 1900
},
{
"epoch": 15.62,
"learning_rate": 2e-05,
"loss": 5.8107,
"step": 2000
},
{
"epoch": 16.4,
"learning_rate": 2.1e-05,
"loss": 5.7927,
"step": 2100
},
{
"epoch": 17.19,
"learning_rate": 2.2000000000000003e-05,
"loss": 5.7739,
"step": 2200
},
{
"epoch": 17.96,
"learning_rate": 2.3000000000000003e-05,
"loss": 5.7129,
"step": 2300
},
{
"epoch": 18.74,
"learning_rate": 2.4e-05,
"loss": 5.7431,
"step": 2400
},
{
"epoch": 19.53,
"learning_rate": 2.5e-05,
"loss": 5.7336,
"step": 2500
},
{
"epoch": 20.31,
"learning_rate": 2.6000000000000002e-05,
"loss": 5.7209,
"step": 2600
},
{
"epoch": 21.09,
"learning_rate": 2.7000000000000002e-05,
"loss": 5.7094,
"step": 2700
},
{
"epoch": 21.87,
"learning_rate": 2.8000000000000003e-05,
"loss": 5.6441,
"step": 2800
},
{
"epoch": 22.65,
"learning_rate": 2.9e-05,
"loss": 5.6845,
"step": 2900
},
{
"epoch": 23.43,
"learning_rate": 3e-05,
"loss": 5.67,
"step": 3000
},
{
"epoch": 24.22,
"learning_rate": 3.1e-05,
"loss": 5.6608,
"step": 3100
},
{
"epoch": 24.99,
"learning_rate": 3.2000000000000005e-05,
"loss": 5.6013,
"step": 3200
},
{
"epoch": 25.78,
"learning_rate": 3.3e-05,
"loss": 5.6329,
"step": 3300
},
{
"epoch": 26.56,
"learning_rate": 3.4000000000000007e-05,
"loss": 5.6337,
"step": 3400
},
{
"epoch": 27.34,
"learning_rate": 3.5e-05,
"loss": 5.6169,
"step": 3500
},
{
"epoch": 28.12,
"learning_rate": 3.6e-05,
"loss": 5.6096,
"step": 3600
},
{
"epoch": 28.9,
"learning_rate": 3.7e-05,
"loss": 5.5511,
"step": 3700
},
{
"epoch": 29.68,
"learning_rate": 3.8e-05,
"loss": 5.5866,
"step": 3800
},
{
"epoch": 30.47,
"learning_rate": 3.9000000000000006e-05,
"loss": 5.5778,
"step": 3900
},
{
"epoch": 31.25,
"learning_rate": 4e-05,
"loss": 5.5705,
"step": 4000
},
{
"epoch": 32.03,
"learning_rate": 4.1e-05,
"loss": 5.5586,
"step": 4100
}
],
"max_steps": 5120,
"num_train_epochs": 40,
"total_flos": 2.77949076996096e+17,
"trial_name": null,
"trial_params": null
}