gpt2-cpt-dutch / trainer_state.json
wdli's picture
Model save
f559c8a verified
raw
history blame
No virus
3.46 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 82,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012195121951219513,
"grad_norm": 10.0,
"learning_rate": 2.2222222222222223e-05,
"loss": 4.6342,
"step": 1
},
{
"epoch": 0.06097560975609756,
"grad_norm": 8.5,
"learning_rate": 0.00011111111111111112,
"loss": 4.6731,
"step": 5
},
{
"epoch": 0.12195121951219512,
"grad_norm": 3.34375,
"learning_rate": 0.00019990741151022301,
"loss": 4.3963,
"step": 10
},
{
"epoch": 0.18292682926829268,
"grad_norm": 2.890625,
"learning_rate": 0.00019668478136052774,
"loss": 4.2375,
"step": 15
},
{
"epoch": 0.24390243902439024,
"grad_norm": 2.28125,
"learning_rate": 0.00018900275764346768,
"loss": 4.1537,
"step": 20
},
{
"epoch": 0.3048780487804878,
"grad_norm": 2.4375,
"learning_rate": 0.00017721565844991643,
"loss": 4.1163,
"step": 25
},
{
"epoch": 0.36585365853658536,
"grad_norm": 2.328125,
"learning_rate": 0.00016186714032625035,
"loss": 4.0943,
"step": 30
},
{
"epoch": 0.4268292682926829,
"grad_norm": 1.9375,
"learning_rate": 0.0001436651231956064,
"loss": 4.0725,
"step": 35
},
{
"epoch": 0.4878048780487805,
"grad_norm": 1.8515625,
"learning_rate": 0.00012344913895704097,
"loss": 4.0395,
"step": 40
},
{
"epoch": 0.5487804878048781,
"grad_norm": 1.6328125,
"learning_rate": 0.00010215160974362223,
"loss": 4.0439,
"step": 45
},
{
"epoch": 0.6097560975609756,
"grad_norm": 1.625,
"learning_rate": 8.075484180291701e-05,
"loss": 4.0223,
"step": 50
},
{
"epoch": 0.6707317073170732,
"grad_norm": 1.7265625,
"learning_rate": 6.024571857174443e-05,
"loss": 4.0318,
"step": 55
},
{
"epoch": 0.7317073170731707,
"grad_norm": 1.78125,
"learning_rate": 4.1570182637163155e-05,
"loss": 4.0052,
"step": 60
},
{
"epoch": 0.7926829268292683,
"grad_norm": 1.7421875,
"learning_rate": 2.5589606012863963e-05,
"loss": 4.0278,
"step": 65
},
{
"epoch": 0.8536585365853658,
"grad_norm": 1.7734375,
"learning_rate": 1.30410610653389e-05,
"loss": 4.019,
"step": 70
},
{
"epoch": 0.9146341463414634,
"grad_norm": 1.609375,
"learning_rate": 4.503324514474483e-06,
"loss": 4.0281,
"step": 75
},
{
"epoch": 0.975609756097561,
"grad_norm": 1.609375,
"learning_rate": 3.701825065392184e-07,
"loss": 4.0339,
"step": 80
},
{
"epoch": 1.0,
"step": 82,
"total_flos": 684585123840000.0,
"train_loss": 4.120773559663354,
"train_runtime": 106.8255,
"train_samples_per_second": 12.263,
"train_steps_per_second": 0.768
}
],
"logging_steps": 5,
"max_steps": 82,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 684585123840000.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}