|
{ |
|
"best_metric": 0.4915876892254164, |
|
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-1/checkpoint-2138", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 3207, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 8.518925666809082, |
|
"learning_rate": 8.972995831812351e-06, |
|
"loss": 0.5555, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 15.430549621582031, |
|
"learning_rate": 8.046990070015412e-06, |
|
"loss": 0.509, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.5366330146789551, |
|
"eval_matthews_correlation": 0.4025454740592728, |
|
"eval_runtime": 0.8238, |
|
"eval_samples_per_second": 1266.155, |
|
"eval_steps_per_second": 80.121, |
|
"step": 1069 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 22.634355545043945, |
|
"learning_rate": 7.120984308218471e-06, |
|
"loss": 0.3856, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 40.38935089111328, |
|
"learning_rate": 6.194978546421531e-06, |
|
"loss": 0.3925, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.6002808213233948, |
|
"eval_matthews_correlation": 0.4915876892254164, |
|
"eval_runtime": 0.7672, |
|
"eval_samples_per_second": 1359.42, |
|
"eval_steps_per_second": 86.023, |
|
"step": 2138 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"grad_norm": 33.39309310913086, |
|
"learning_rate": 5.2689727846245905e-06, |
|
"loss": 0.3121, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"grad_norm": 28.698448181152344, |
|
"learning_rate": 4.34296702282765e-06, |
|
"loss": 0.2998, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.6921499371528625, |
|
"eval_matthews_correlation": 0.48652176899433314, |
|
"eval_runtime": 0.7643, |
|
"eval_samples_per_second": 1364.583, |
|
"eval_steps_per_second": 86.349, |
|
"step": 3207 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 5345, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 112981496058780.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 9.899001593609293e-06, |
|
"num_train_epochs": 5, |
|
"per_device_train_batch_size": 8, |
|
"seed": 30 |
|
} |
|
} |
|
|