|
{ |
|
"best_metric": 0.7430530679085001, |
|
"best_model_checkpoint": "bertbase-uncased-2-actual/checkpoint-20000", |
|
"epoch": 1.0, |
|
"global_step": 20000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.751e-06, |
|
"loss": 0.6643, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.501000000000001e-06, |
|
"loss": 0.6215, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.251000000000002e-06, |
|
"loss": 0.6165, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.0015e-06, |
|
"loss": 0.6052, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.7515e-06, |
|
"loss": 0.5804, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.5015e-06, |
|
"loss": 0.5753, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.2515e-06, |
|
"loss": 0.601, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.001500000000001e-06, |
|
"loss": 0.5831, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.7515e-06, |
|
"loss": 0.5854, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.502e-06, |
|
"loss": 0.5882, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.252e-06, |
|
"loss": 0.5659, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.002000000000001e-06, |
|
"loss": 0.564, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 6.752000000000001e-06, |
|
"loss": 0.5692, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 6.502000000000001e-06, |
|
"loss": 0.5755, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 6.2520000000000004e-06, |
|
"loss": 0.5547, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 6.002e-06, |
|
"loss": 0.5676, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 5.752500000000001e-06, |
|
"loss": 0.5788, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 5.5025e-06, |
|
"loss": 0.5486, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 5.2525e-06, |
|
"loss": 0.5601, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.0025e-06, |
|
"loss": 0.5331, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.752500000000001e-06, |
|
"loss": 0.5659, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.5025000000000005e-06, |
|
"loss": 0.5652, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.2525e-06, |
|
"loss": 0.5529, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.0025e-06, |
|
"loss": 0.5531, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.7530000000000003e-06, |
|
"loss": 0.5556, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.503e-06, |
|
"loss": 0.5323, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.253e-06, |
|
"loss": 0.5809, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.0030000000000003e-06, |
|
"loss": 0.5367, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.753e-06, |
|
"loss": 0.5706, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 2.5040000000000005e-06, |
|
"loss": 0.5386, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.254e-06, |
|
"loss": 0.552, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.004e-06, |
|
"loss": 0.5468, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.754e-06, |
|
"loss": 0.5546, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.5045000000000002e-06, |
|
"loss": 0.5495, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.2545000000000002e-06, |
|
"loss": 0.5439, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.0045e-06, |
|
"loss": 0.562, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 7.545000000000001e-07, |
|
"loss": 0.5589, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 5.045000000000001e-07, |
|
"loss": 0.5585, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 2.545e-07, |
|
"loss": 0.5288, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.500000000000001e-09, |
|
"loss": 0.5205, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.74895, |
|
"eval_f1": 0.7430530679085001, |
|
"eval_loss": 0.5389962196350098, |
|
"eval_runtime": 136.8617, |
|
"eval_samples_per_second": 146.133, |
|
"eval_steps_per_second": 36.533, |
|
"step": 20000 |
|
} |
|
], |
|
"max_steps": 20000, |
|
"num_train_epochs": 1, |
|
"total_flos": 6597569324250480.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|