|
{ |
|
"best_metric": 0.3440490710592986, |
|
"best_model_checkpoint": "esm2_t12_35M_qlora_binding_sites_2023-09-30_06-27-21/checkpoint-5289", |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 5289, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0003700117150420716, |
|
"loss": 0.4265, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0003695766709151845, |
|
"loss": 0.2446, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00036885235529645174, |
|
"loss": 0.1964, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00036783990382720894, |
|
"loss": 0.1714, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000366540903911771, |
|
"loss": 0.1556, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00036495739222857005, |
|
"loss": 0.1454, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00036309185153688393, |
|
"loss": 0.1387, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0003609472067841633, |
|
"loss": 0.133, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0003585268205200591, |
|
"loss": 0.1289, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0003558344876243421, |
|
"loss": 0.1228, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0003528744293569798, |
|
"loss": 0.1208, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00034965128673969907, |
|
"loss": 0.1167, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0003461701132794125, |
|
"loss": 0.1131, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0003424363670449164, |
|
"loss": 0.1113, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00033845590210928363, |
|
"loss": 0.1088, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00033423495937136847, |
|
"loss": 0.1081, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00032978015677081454, |
|
"loss": 0.1054, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00032509847891190683, |
|
"loss": 0.1029, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0003201972661125376, |
|
"loss": 0.102, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0003150842028954544, |
|
"loss": 0.0999, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003097673059398367, |
|
"loss": 0.098, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00030425491151208934, |
|
"loss": 0.0972, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00029855566239556173, |
|
"loss": 0.0968, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0002926784943396845, |
|
"loss": 0.0947, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00028663262204976965, |
|
"loss": 0.0933, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00028042752473944106, |
|
"loss": 0.094, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9297529150299436, |
|
"eval_auc": 0.8167222019799886, |
|
"eval_f1": 0.3440490710592986, |
|
"eval_loss": 0.6011912822723389, |
|
"eval_mcc": 0.3730152153022164, |
|
"eval_precision": 0.22835223718675476, |
|
"eval_recall": 0.697386656717114, |
|
"eval_runtime": 3756.1015, |
|
"eval_samples_per_second": 144.201, |
|
"eval_steps_per_second": 0.687, |
|
"step": 5289 |
|
} |
|
], |
|
"logging_steps": 200, |
|
"max_steps": 15867, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 4.006517704836067e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|