|
{ |
|
"best_metric": 2.4635186195373535, |
|
"best_model_checkpoint": "./drive/MyDrive/peptide_esm/checkpoint-10488", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 10488, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.7004187107086182, |
|
"learning_rate": 7.5e-07, |
|
"loss": 2.5303, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.7539237141609192, |
|
"learning_rate": 1.5e-06, |
|
"loss": 2.5119, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.9745526313781738, |
|
"learning_rate": 2.25e-06, |
|
"loss": 2.4899, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.6661661863327026, |
|
"learning_rate": 3e-06, |
|
"loss": 2.4903, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.7792529463768005, |
|
"learning_rate": 2.9380677126341868e-06, |
|
"loss": 2.4831, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 2.211763381958008, |
|
"learning_rate": 2.8762592898431047e-06, |
|
"loss": 2.4785, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.7369110584259033, |
|
"learning_rate": 2.814450867052023e-06, |
|
"loss": 2.4763, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.8026860952377319, |
|
"learning_rate": 2.75251857968621e-06, |
|
"loss": 2.4702, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.5967105627059937, |
|
"learning_rate": 2.6905862923203965e-06, |
|
"loss": 2.4703, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.68107670545578, |
|
"learning_rate": 2.6286540049545828e-06, |
|
"loss": 2.4699, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.46760630607605, |
|
"eval_runtime": 181.2222, |
|
"eval_samples_per_second": 823.095, |
|
"eval_steps_per_second": 6.434, |
|
"step": 5244 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 1.064468502998352, |
|
"learning_rate": 2.56672171758877e-06, |
|
"loss": 2.4671, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 1.5440330505371094, |
|
"learning_rate": 2.5047894302229566e-06, |
|
"loss": 2.4663, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 0.913020133972168, |
|
"learning_rate": 2.442857142857143e-06, |
|
"loss": 2.4654, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 0.8316154479980469, |
|
"learning_rate": 2.3810487200660612e-06, |
|
"loss": 2.4694, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 0.6185809969902039, |
|
"learning_rate": 2.3191164327002475e-06, |
|
"loss": 2.4668, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 1.1215540170669556, |
|
"learning_rate": 2.2571841453344346e-06, |
|
"loss": 2.4668, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"grad_norm": 0.6679463982582092, |
|
"learning_rate": 2.195251857968621e-06, |
|
"loss": 2.4681, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 1.1260002851486206, |
|
"learning_rate": 2.1333195706028075e-06, |
|
"loss": 2.4639, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 0.6827268004417419, |
|
"learning_rate": 2.0713872832369942e-06, |
|
"loss": 2.4631, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"grad_norm": 0.7196531891822815, |
|
"learning_rate": 2.0095788604459126e-06, |
|
"loss": 2.4644, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.4635186195373535, |
|
"eval_runtime": 180.8599, |
|
"eval_samples_per_second": 824.744, |
|
"eval_steps_per_second": 6.447, |
|
"step": 10488 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 26220, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 4.1949317916310464e+17, |
|
"train_batch_size": 256, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|