|
{ |
|
"best_metric": 1.0987577438354492, |
|
"best_model_checkpoint": "hBERTv2_data_aug_mnli/checkpoint-314400", |
|
"epoch": 15.0, |
|
"global_step": 471600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.900038167938932e-05, |
|
"loss": 1.0988, |
|
"step": 31440 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.09883713722229, |
|
"eval_runtime": 11.411, |
|
"eval_samples_per_second": 860.133, |
|
"eval_steps_per_second": 3.418, |
|
"step": 31440 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.8000731552162856e-05, |
|
"loss": 1.0985, |
|
"step": 62880 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0991657972335815, |
|
"eval_runtime": 11.4006, |
|
"eval_samples_per_second": 860.917, |
|
"eval_steps_per_second": 3.421, |
|
"step": 62880 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.7001176844783717e-05, |
|
"loss": 1.0985, |
|
"step": 94320 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.099130392074585, |
|
"eval_runtime": 11.4603, |
|
"eval_samples_per_second": 856.436, |
|
"eval_steps_per_second": 3.403, |
|
"step": 94320 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.6001685750636135e-05, |
|
"loss": 1.0985, |
|
"step": 125760 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0990595817565918, |
|
"eval_runtime": 11.4198, |
|
"eval_samples_per_second": 859.471, |
|
"eval_steps_per_second": 3.415, |
|
"step": 125760 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.5002131043257e-05, |
|
"loss": 1.0985, |
|
"step": 157200 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0987752676010132, |
|
"eval_runtime": 11.4346, |
|
"eval_samples_per_second": 858.363, |
|
"eval_steps_per_second": 3.411, |
|
"step": 157200 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 4.4002639949109414e-05, |
|
"loss": 1.0985, |
|
"step": 188640 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0987842082977295, |
|
"eval_runtime": 11.6007, |
|
"eval_samples_per_second": 846.071, |
|
"eval_steps_per_second": 3.362, |
|
"step": 188640 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.300311704834606e-05, |
|
"loss": 1.0985, |
|
"step": 220080 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0988376140594482, |
|
"eval_runtime": 11.7431, |
|
"eval_samples_per_second": 835.809, |
|
"eval_steps_per_second": 3.321, |
|
"step": 220080 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.200356234096692e-05, |
|
"loss": 1.0985, |
|
"step": 251520 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0988106727600098, |
|
"eval_runtime": 11.6275, |
|
"eval_samples_per_second": 844.121, |
|
"eval_steps_per_second": 3.354, |
|
"step": 251520 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.100400763358779e-05, |
|
"loss": 1.0985, |
|
"step": 282960 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0987931489944458, |
|
"eval_runtime": 11.4012, |
|
"eval_samples_per_second": 860.872, |
|
"eval_steps_per_second": 3.421, |
|
"step": 282960 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.000445292620865e-05, |
|
"loss": 1.0985, |
|
"step": 314400 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0987577438354492, |
|
"eval_runtime": 11.4144, |
|
"eval_samples_per_second": 859.88, |
|
"eval_steps_per_second": 3.417, |
|
"step": 314400 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 3.9004930025445295e-05, |
|
"loss": 1.0985, |
|
"step": 345840 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0988106727600098, |
|
"eval_runtime": 11.4047, |
|
"eval_samples_per_second": 860.606, |
|
"eval_steps_per_second": 3.42, |
|
"step": 345840 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 3.800537531806616e-05, |
|
"loss": 1.0985, |
|
"step": 377280 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0988460779190063, |
|
"eval_runtime": 11.4535, |
|
"eval_samples_per_second": 856.946, |
|
"eval_steps_per_second": 3.405, |
|
"step": 377280 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 3.70058524173028e-05, |
|
"loss": 1.0985, |
|
"step": 408720 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0987931489944458, |
|
"eval_runtime": 11.41, |
|
"eval_samples_per_second": 860.209, |
|
"eval_steps_per_second": 3.418, |
|
"step": 408720 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 3.600636132315522e-05, |
|
"loss": 1.0985, |
|
"step": 440160 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0988106727600098, |
|
"eval_runtime": 11.4231, |
|
"eval_samples_per_second": 859.227, |
|
"eval_steps_per_second": 3.414, |
|
"step": 440160 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 3.500680661577608e-05, |
|
"loss": 1.0985, |
|
"step": 471600 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.31818644931227713, |
|
"eval_loss": 1.0987842082977295, |
|
"eval_runtime": 11.3973, |
|
"eval_samples_per_second": 861.165, |
|
"eval_steps_per_second": 3.422, |
|
"step": 471600 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"step": 471600, |
|
"total_flos": 1.5037282485912207e+19, |
|
"train_loss": 1.0985465619036259, |
|
"train_runtime": 409336.4634, |
|
"train_samples_per_second": 983.123, |
|
"train_steps_per_second": 3.84 |
|
} |
|
], |
|
"max_steps": 1572000, |
|
"num_train_epochs": 50, |
|
"total_flos": 1.5037282485912207e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|