opus-em-deberta-3-large-v2 / trainer_state.json
Kerem P
End of training
9e5bc07
raw
history blame
3.43 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9916434540389973,
"eval_steps": 500,
"global_step": 537,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"learning_rate": 2e-05,
"loss": 95.476,
"step": 20
},
{
"epoch": 0.22,
"learning_rate": 2e-05,
"loss": 48.4218,
"step": 40
},
{
"epoch": 0.33,
"learning_rate": 2e-05,
"loss": 13.7763,
"step": 60
},
{
"epoch": 0.45,
"learning_rate": 2e-05,
"loss": 1.648,
"step": 80
},
{
"epoch": 0.56,
"learning_rate": 2e-05,
"loss": 1.4463,
"step": 100
},
{
"epoch": 0.67,
"learning_rate": 2e-05,
"loss": 0.9103,
"step": 120
},
{
"epoch": 0.78,
"learning_rate": 2e-05,
"loss": 0.6693,
"step": 140
},
{
"epoch": 0.89,
"learning_rate": 2e-05,
"loss": 0.8188,
"step": 160
},
{
"epoch": 1.0,
"learning_rate": 2e-05,
"loss": 0.7446,
"step": 180
},
{
"epoch": 1.11,
"learning_rate": 2e-05,
"loss": 0.6158,
"step": 200
},
{
"epoch": 1.23,
"learning_rate": 2e-05,
"loss": 0.9035,
"step": 220
},
{
"epoch": 1.34,
"learning_rate": 2e-05,
"loss": 0.9486,
"step": 240
},
{
"epoch": 1.45,
"learning_rate": 2e-05,
"loss": 0.7198,
"step": 260
},
{
"epoch": 1.56,
"learning_rate": 2e-05,
"loss": 1.5337,
"step": 280
},
{
"epoch": 1.67,
"learning_rate": 2e-05,
"loss": 1.3312,
"step": 300
},
{
"epoch": 1.78,
"learning_rate": 2e-05,
"loss": 0.8632,
"step": 320
},
{
"epoch": 1.89,
"learning_rate": 2e-05,
"loss": 0.8279,
"step": 340
},
{
"epoch": 2.01,
"learning_rate": 2e-05,
"loss": 0.9823,
"step": 360
},
{
"epoch": 2.12,
"learning_rate": 2e-05,
"loss": 0.7963,
"step": 380
},
{
"epoch": 2.23,
"learning_rate": 2e-05,
"loss": 0.7861,
"step": 400
},
{
"epoch": 2.34,
"learning_rate": 2e-05,
"loss": 0.9859,
"step": 420
},
{
"epoch": 2.45,
"learning_rate": 2e-05,
"loss": 1.0639,
"step": 440
},
{
"epoch": 2.56,
"learning_rate": 2e-05,
"loss": 0.7174,
"step": 460
},
{
"epoch": 2.67,
"learning_rate": 2e-05,
"loss": 0.7996,
"step": 480
},
{
"epoch": 2.79,
"learning_rate": 2e-05,
"loss": 0.7602,
"step": 500
},
{
"epoch": 2.9,
"learning_rate": 2e-05,
"loss": 0.8271,
"step": 520
},
{
"epoch": 2.99,
"step": 537,
"total_flos": 1345715322224640.0,
"train_loss": 6.7171177713120676,
"train_runtime": 1535.649,
"train_samples_per_second": 11.219,
"train_steps_per_second": 0.35
}
],
"logging_steps": 20,
"max_steps": 537,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 1345715322224640.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}