|
{ |
|
"best_metric": 0.19848246486644372, |
|
"best_model_checkpoint": "esm2_t6_8M_qlora_binding_sites_2023-09-29_03-34-40/checkpoint-3939", |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 3939, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0003700096638060586, |
|
"loss": 0.5009, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0003695684724487918, |
|
"loss": 0.3725, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00036883393302294926, |
|
"loss": 0.3363, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0003678072134814851, |
|
"loss": 0.3171, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0003664899463577655, |
|
"loss": 0.3041, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00036488422616976233, |
|
"loss": 0.2954, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0003629926060896693, |
|
"loss": 0.2872, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00036081809388423475, |
|
"loss": 0.2804, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0003583641471322691, |
|
"loss": 0.2754, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00035563466772692797, |
|
"loss": 0.2696, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0003526339956715147, |
|
"loss": 0.2675, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00034936690217866705, |
|
"loss": 0.2642, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00034583858208389975, |
|
"loss": 0.262, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0003420546455855667, |
|
"loss": 0.2592, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00033802110932437625, |
|
"loss": 0.2565, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003337443868166437, |
|
"loss": 0.2542, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00032923127825649205, |
|
"loss": 0.2509, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00032448895970321745, |
|
"loss": 0.2509, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0003195249716710097, |
|
"loss": 0.2464, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8625347674451358, |
|
"eval_auc": 0.8222331548742136, |
|
"eval_f1": 0.19848246486644372, |
|
"eval_loss": 0.41490185260772705, |
|
"eval_mcc": 0.2639007297474409, |
|
"eval_precision": 0.11370668247419904, |
|
"eval_recall": 0.7800926533683039, |
|
"eval_runtime": 1163.712, |
|
"eval_samples_per_second": 109.693, |
|
"eval_steps_per_second": 1.714, |
|
"step": 3939 |
|
} |
|
], |
|
"logging_steps": 200, |
|
"max_steps": 15756, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"total_flos": 2.0257292207917116e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|