|
{ |
|
"best_metric": 2.256788492202759, |
|
"best_model_checkpoint": "distilbert_lda_5_v1_stsb/checkpoint-23", |
|
"epoch": 6.0, |
|
"eval_steps": 500, |
|
"global_step": 138, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 6.748433589935303, |
|
"learning_rate": 0.00098, |
|
"loss": 6.3938, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_combined_score": NaN, |
|
"eval_loss": 2.256788492202759, |
|
"eval_pearson": NaN, |
|
"eval_runtime": 0.6384, |
|
"eval_samples_per_second": 2349.63, |
|
"eval_spearmanr": NaN, |
|
"eval_steps_per_second": 9.399, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 18.032630920410156, |
|
"learning_rate": 0.00096, |
|
"loss": 2.2292, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_combined_score": NaN, |
|
"eval_loss": 2.2729742527008057, |
|
"eval_pearson": NaN, |
|
"eval_runtime": 0.636, |
|
"eval_samples_per_second": 2358.504, |
|
"eval_spearmanr": NaN, |
|
"eval_steps_per_second": 9.434, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 3.141530990600586, |
|
"learning_rate": 0.00094, |
|
"loss": 2.2087, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_combined_score": -0.03073418981930874, |
|
"eval_loss": 2.3179895877838135, |
|
"eval_pearson": -0.030123554701908854, |
|
"eval_runtime": 0.6514, |
|
"eval_samples_per_second": 2302.65, |
|
"eval_spearmanr": -0.03134482493670863, |
|
"eval_steps_per_second": 9.211, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 3.1647350788116455, |
|
"learning_rate": 0.00092, |
|
"loss": 2.2043, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_combined_score": 0.009516354467436434, |
|
"eval_loss": 2.654869794845581, |
|
"eval_pearson": 0.007963371211781475, |
|
"eval_runtime": 0.6535, |
|
"eval_samples_per_second": 2295.416, |
|
"eval_spearmanr": 0.011069337723091394, |
|
"eval_steps_per_second": 9.182, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 14.778202056884766, |
|
"learning_rate": 0.0009000000000000001, |
|
"loss": 2.2273, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_combined_score": 0.0019123765153302998, |
|
"eval_loss": 2.615886688232422, |
|
"eval_pearson": 0.0017619346721126088, |
|
"eval_runtime": 0.652, |
|
"eval_samples_per_second": 2300.625, |
|
"eval_spearmanr": 0.002062818358547991, |
|
"eval_steps_per_second": 9.202, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 3.5717430114746094, |
|
"learning_rate": 0.00088, |
|
"loss": 2.1946, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_combined_score": NaN, |
|
"eval_loss": 2.3650248050689697, |
|
"eval_pearson": NaN, |
|
"eval_runtime": 0.6498, |
|
"eval_samples_per_second": 2308.569, |
|
"eval_spearmanr": NaN, |
|
"eval_steps_per_second": 9.234, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"step": 138, |
|
"total_flos": 2284624480859136.0, |
|
"train_loss": 2.9096765656402144, |
|
"train_runtime": 46.2023, |
|
"train_samples_per_second": 6221.552, |
|
"train_steps_per_second": 24.891 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1150, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 5 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2284624480859136.0, |
|
"train_batch_size": 256, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|