|
{ |
|
"best_metric": 0.1931125248049425, |
|
"best_model_checkpoint": "esm2_t6_8M_qlora_binding_sites_2023-09-28_00-05-45/checkpoint-3939", |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 3939, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0003700096638060586, |
|
"loss": 0.4987, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0003695684724487918, |
|
"loss": 0.3756, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00036883393302294926, |
|
"loss": 0.3422, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0003678072134814851, |
|
"loss": 0.3223, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0003664899463577655, |
|
"loss": 0.3094, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00036488422616976233, |
|
"loss": 0.3001, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0003629926060896693, |
|
"loss": 0.2916, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00036081809388423475, |
|
"loss": 0.2847, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0003583641471322691, |
|
"loss": 0.2787, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00035563466772692797, |
|
"loss": 0.2725, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0003526339956715147, |
|
"loss": 0.2699, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00034936690217866705, |
|
"loss": 0.2668, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00034583858208389975, |
|
"loss": 0.2636, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0003420546455855667, |
|
"loss": 0.2604, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00033802110932437625, |
|
"loss": 0.2577, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003337443868166437, |
|
"loss": 0.2545, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00032923127825649205, |
|
"loss": 0.2515, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00032448895970321745, |
|
"loss": 0.2518, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0003195503301009428, |
|
"loss": 0.2465, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8584993872301376, |
|
"eval_auc": 0.8182032038033911, |
|
"eval_f1": 0.1931125248049425, |
|
"eval_loss": 0.4206424355506897, |
|
"eval_mcc": 0.25789003640920655, |
|
"eval_precision": 0.11027656492418843, |
|
"eval_recall": 0.7760683791635847, |
|
"eval_runtime": 1158.8552, |
|
"eval_samples_per_second": 110.153, |
|
"eval_steps_per_second": 1.722, |
|
"step": 3939 |
|
} |
|
], |
|
"logging_steps": 200, |
|
"max_steps": 15756, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"total_flos": 2.025377780531094e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|