dgt-mt-hu-ro / trainer_state.json
teodordu's picture
Added all files from model checkpoint-62500 for vocab_size 20000, 10 epochs
732ed89
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"global_step": 62500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"learning_rate": 1.9333333333333333e-05,
"loss": 5.4183,
"step": 6250
},
{
"epoch": 2.0,
"learning_rate": 1.866688e-05,
"loss": 4.1174,
"step": 12500
},
{
"epoch": 3.0,
"learning_rate": 1.8000533333333335e-05,
"loss": 3.5049,
"step": 18750
},
{
"epoch": 4.0,
"learning_rate": 1.7334080000000003e-05,
"loss": 3.0671,
"step": 25000
},
{
"epoch": 5.0,
"learning_rate": 1.6667626666666667e-05,
"loss": 2.7197,
"step": 31250
},
{
"epoch": 6.0,
"learning_rate": 1.600128e-05,
"loss": 2.4474,
"step": 37500
},
{
"epoch": 7.0,
"learning_rate": 1.533482666666667e-05,
"loss": 2.2439,
"step": 43750
},
{
"epoch": 8.0,
"learning_rate": 1.4668373333333334e-05,
"loss": 2.091,
"step": 50000
},
{
"epoch": 9.0,
"learning_rate": 1.4001920000000002e-05,
"loss": 1.9731,
"step": 56250
},
{
"epoch": 10.0,
"learning_rate": 1.3335360000000002e-05,
"loss": 1.8787,
"step": 62500
}
],
"max_steps": 187500,
"num_train_epochs": 30,
"total_flos": 1.1680685027544269e+17,
"trial_name": null,
"trial_params": null
}