german_gpt_4g / trainer_state.json
danurahul's picture
Initial commit
2ed85b8
raw
history blame
3.26 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9382957884427032,
"global_step": 12000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 5e-05,
"loss": 4.8251,
"step": 500
},
{
"epoch": 0.24,
"learning_rate": 4.787270251872022e-05,
"loss": 4.4562,
"step": 1000
},
{
"epoch": 0.37,
"learning_rate": 4.574540503744044e-05,
"loss": 4.3579,
"step": 1500
},
{
"epoch": 0.49,
"learning_rate": 4.3618107556160655e-05,
"loss": 4.2909,
"step": 2000
},
{
"epoch": 0.61,
"learning_rate": 4.149081007488088e-05,
"loss": 4.2459,
"step": 2500
},
{
"epoch": 0.73,
"learning_rate": 3.936351259360109e-05,
"loss": 4.2102,
"step": 3000
},
{
"epoch": 0.86,
"learning_rate": 3.723621511232131e-05,
"loss": 4.1895,
"step": 3500
},
{
"epoch": 0.98,
"learning_rate": 3.510891763104153e-05,
"loss": 4.1604,
"step": 4000
},
{
"epoch": 1.1,
"learning_rate": 3.2981620149761745e-05,
"loss": 4.044,
"step": 4500
},
{
"epoch": 1.22,
"learning_rate": 3.085432266848196e-05,
"loss": 4.0203,
"step": 5000
},
{
"epoch": 1.35,
"learning_rate": 2.872702518720218e-05,
"loss": 4.0172,
"step": 5500
},
{
"epoch": 1.47,
"learning_rate": 2.6599727705922394e-05,
"loss": 4.0025,
"step": 6000
},
{
"epoch": 1.59,
"learning_rate": 2.4472430224642616e-05,
"loss": 3.9855,
"step": 6500
},
{
"epoch": 1.71,
"learning_rate": 2.234513274336283e-05,
"loss": 3.9791,
"step": 7000
},
{
"epoch": 1.84,
"learning_rate": 2.021783526208305e-05,
"loss": 3.9777,
"step": 7500
},
{
"epoch": 1.96,
"learning_rate": 1.809053778080327e-05,
"loss": 3.9665,
"step": 8000
},
{
"epoch": 2.08,
"learning_rate": 1.5963240299523484e-05,
"loss": 3.903,
"step": 8500
},
{
"epoch": 2.2,
"learning_rate": 1.3835942818243706e-05,
"loss": 3.8656,
"step": 9000
},
{
"epoch": 2.33,
"learning_rate": 1.1708645336963923e-05,
"loss": 3.8674,
"step": 9500
},
{
"epoch": 2.45,
"learning_rate": 9.58134785568414e-06,
"loss": 3.8754,
"step": 10000
},
{
"epoch": 2.57,
"learning_rate": 7.4540503744043575e-06,
"loss": 3.8795,
"step": 10500
},
{
"epoch": 2.69,
"learning_rate": 5.326752893124575e-06,
"loss": 3.8656,
"step": 11000
},
{
"epoch": 2.82,
"learning_rate": 3.1994554118447927e-06,
"loss": 3.8711,
"step": 11500
},
{
"epoch": 2.94,
"learning_rate": 1.0721579305650103e-06,
"loss": 3.8594,
"step": 12000
}
],
"max_steps": 12252,
"num_train_epochs": 3,
"total_flos": 37091057394253824,
"trial_name": null,
"trial_params": null
}