|
{ |
|
"best_metric": 0.6188952922821045, |
|
"best_model_checkpoint": "bert_base_lda_20_v1_cola/checkpoint-68", |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 238, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.4452758729457855, |
|
"learning_rate": 0.00098, |
|
"loss": 0.7571, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6196237802505493, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.6803, |
|
"eval_samples_per_second": 1533.141, |
|
"eval_steps_per_second": 7.35, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.9407540559768677, |
|
"learning_rate": 0.00096, |
|
"loss": 0.613, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6188952922821045, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.6631, |
|
"eval_samples_per_second": 1572.981, |
|
"eval_steps_per_second": 7.541, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 1.0614078044891357, |
|
"learning_rate": 0.00094, |
|
"loss": 0.6097, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6194814443588257, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.6519, |
|
"eval_samples_per_second": 1599.847, |
|
"eval_steps_per_second": 7.669, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.6363035440444946, |
|
"learning_rate": 0.00092, |
|
"loss": 0.6106, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6196293830871582, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.6778, |
|
"eval_samples_per_second": 1538.719, |
|
"eval_steps_per_second": 7.376, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.9160279035568237, |
|
"learning_rate": 0.0009000000000000001, |
|
"loss": 0.6114, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6194814443588257, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.6776, |
|
"eval_samples_per_second": 1539.156, |
|
"eval_steps_per_second": 7.379, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.48956847190856934, |
|
"learning_rate": 0.00088, |
|
"loss": 0.6094, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6189964413642883, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.6726, |
|
"eval_samples_per_second": 1550.586, |
|
"eval_steps_per_second": 7.433, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 1.5084835290908813, |
|
"learning_rate": 0.00086, |
|
"loss": 0.6102, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.619966447353363, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.6632, |
|
"eval_samples_per_second": 1572.699, |
|
"eval_steps_per_second": 7.539, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"step": 238, |
|
"total_flos": 7874519220341760.0, |
|
"train_loss": 0.6316283650758887, |
|
"train_runtime": 116.2402, |
|
"train_samples_per_second": 3678.16, |
|
"train_steps_per_second": 14.625 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1700, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 5 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7874519220341760.0, |
|
"train_batch_size": 256, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|