esm2_t33_650M_qlora_binding_12M
/
esm2_t33_650M_qlora_binding_sites_2023-10-18_02-14-48
/checkpoint-10000
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.19692792437967704, | |
"eval_steps": 500, | |
"global_step": 10000, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0, | |
"learning_rate": 0.00017015520980041642, | |
"loss": 0.5575, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.0, | |
"learning_rate": 0.00017015035788629153, | |
"loss": 0.3065, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.00017014224985982174, | |
"loss": 0.2229, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.0001701308860313402, | |
"loss": 0.1812, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.00017011626683579524, | |
"loss": 0.1572, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.00017009839283273364, | |
"loss": 0.1335, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.00017007726470627936, | |
"loss": 0.1193, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.00017005288326510734, | |
"loss": 0.1075, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.0001700252494424124, | |
"loss": 0.0986, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.00016999436429587366, | |
"loss": 0.0952, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.00016996022900761407, | |
"loss": 0.0865, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.000169922844884155, | |
"loss": 0.0816, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.00016988221335636648, | |
"loss": 0.0775, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.00016983833597941224, | |
"loss": 0.0744, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.00016979121443269025, | |
"loss": 0.0694, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.0001697408505197684, | |
"loss": 0.0641, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.00016968724616831557, | |
"loss": 0.0629, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.0001696304034300278, | |
"loss": 0.0696, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.00016957032448054968, | |
"loss": 0.0591, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.0001695070116193912, | |
"loss": 0.06, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.0001694404672698396, | |
"loss": 0.0558, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.00016937069397886687, | |
"loss": 0.0529, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.00016929769441703196, | |
"loss": 0.0536, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.00016922147137837868, | |
"loss": 0.0537, | |
"step": 2400 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.00016914202778032893, | |
"loss": 0.0508, | |
"step": 2500 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.0001690593666635707, | |
"loss": 0.0524, | |
"step": 2600 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.00016897349119194207, | |
"loss": 0.0494, | |
"step": 2700 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.00016888440465230977, | |
"loss": 0.047, | |
"step": 2800 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.00016879211045444354, | |
"loss": 0.0502, | |
"step": 2900 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.00016869661213088575, | |
"loss": 0.046, | |
"step": 3000 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.00016859791333681583, | |
"loss": 0.0419, | |
"step": 3100 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.0001684960178499108, | |
"loss": 0.0456, | |
"step": 3200 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.00016839092957020028, | |
"loss": 0.0432, | |
"step": 3300 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.00016828265251991761, | |
"loss": 0.0423, | |
"step": 3400 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.00016817119084334555, | |
"loss": 0.0407, | |
"step": 3500 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.00016805654880665776, | |
"loss": 0.0409, | |
"step": 3600 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.00016793873079775577, | |
"loss": 0.0397, | |
"step": 3700 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.00016781774132610059, | |
"loss": 0.0419, | |
"step": 3800 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.0001676935850225405, | |
"loss": 0.0399, | |
"step": 3900 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.00016756626663913358, | |
"loss": 0.0389, | |
"step": 4000 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.00016743579104896593, | |
"loss": 0.0385, | |
"step": 4100 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.00016730216324596504, | |
"loss": 0.0357, | |
"step": 4200 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.0001671653883447088, | |
"loss": 0.0373, | |
"step": 4300 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00016702547158022968, | |
"loss": 0.0375, | |
"step": 4400 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.0001668824183078143, | |
"loss": 0.0393, | |
"step": 4500 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00016673623400279849, | |
"loss": 0.0358, | |
"step": 4600 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00016658692426035782, | |
"loss": 0.0334, | |
"step": 4700 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00016643449479529325, | |
"loss": 0.035, | |
"step": 4800 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00016627895144181258, | |
"loss": 0.0343, | |
"step": 4900 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.0001661203001533071, | |
"loss": 0.0335, | |
"step": 5000 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00016595854700212362, | |
"loss": 0.0353, | |
"step": 5100 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.0001657936981793322, | |
"loss": 0.0331, | |
"step": 5200 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.0001656257599944891, | |
"loss": 0.0387, | |
"step": 5300 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.00016545473887539532, | |
"loss": 0.0327, | |
"step": 5400 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.00016528064136785056, | |
"loss": 0.0316, | |
"step": 5500 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.00016510347413540262, | |
"loss": 0.0319, | |
"step": 5600 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.0001649232439590925, | |
"loss": 0.0314, | |
"step": 5700 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.0001647399577371947, | |
"loss": 0.0321, | |
"step": 5800 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00016455362248495338, | |
"loss": 0.0333, | |
"step": 5900 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00016436424533431362, | |
"loss": 0.0319, | |
"step": 6000 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.0001641718335336486, | |
"loss": 0.0315, | |
"step": 6100 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.0001639763944474821, | |
"loss": 0.0311, | |
"step": 6200 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.0001637779355562068, | |
"loss": 0.031, | |
"step": 6300 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00016357646445579763, | |
"loss": 0.0299, | |
"step": 6400 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00016337198885752133, | |
"loss": 0.0317, | |
"step": 6500 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00016316451658764122, | |
"loss": 0.0302, | |
"step": 6600 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.0001629540555871176, | |
"loss": 0.0295, | |
"step": 6700 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00016274061391130388, | |
"loss": 0.03, | |
"step": 6800 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.0001625241997296382, | |
"loss": 0.0292, | |
"step": 6900 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.00016230482132533077, | |
"loss": 0.0289, | |
"step": 7000 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.00016208472504084003, | |
"loss": 0.0318, | |
"step": 7100 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.0001618594729250462, | |
"loss": 0.0301, | |
"step": 7200 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.00016163128202889828, | |
"loss": 0.0295, | |
"step": 7300 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00016140016108635798, | |
"loss": 0.029, | |
"step": 7400 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00016116611894353386, | |
"loss": 0.0291, | |
"step": 7500 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00016092916455834295, | |
"loss": 0.0311, | |
"step": 7600 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00016068930700016766, | |
"loss": 0.0285, | |
"step": 7700 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00016044655544950889, | |
"loss": 0.0287, | |
"step": 7800 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.00016020091919763445, | |
"loss": 0.0293, | |
"step": 7900 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.00015995240764622357, | |
"loss": 0.0259, | |
"step": 8000 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.000159701030307007, | |
"loss": 0.0293, | |
"step": 8100 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.00015944679680140295, | |
"loss": 0.0277, | |
"step": 8200 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.0001591897168601488, | |
"loss": 0.0304, | |
"step": 8300 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00015892980032292876, | |
"loss": 0.026, | |
"step": 8400 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00015866705713799714, | |
"loss": 0.0294, | |
"step": 8500 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00015840149736179762, | |
"loss": 0.0321, | |
"step": 8600 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.0001581331311585785, | |
"loss": 0.0279, | |
"step": 8700 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00015786196880000325, | |
"loss": 0.0277, | |
"step": 8800 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.0001575880206647579, | |
"loss": 0.0268, | |
"step": 8900 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00015731129723815343, | |
"loss": 0.0281, | |
"step": 9000 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00015703180911172453, | |
"loss": 0.028, | |
"step": 9100 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.0001567495669828243, | |
"loss": 0.026, | |
"step": 9200 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.0001564645816542146, | |
"loss": 0.0256, | |
"step": 9300 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.0001561768640336529, | |
"loss": 0.027, | |
"step": 9400 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.0001558864251334745, | |
"loss": 0.0249, | |
"step": 9500 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00015559327607017119, | |
"loss": 0.0256, | |
"step": 9600 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00015529742806396564, | |
"loss": 0.0251, | |
"step": 9700 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00015499889243838211, | |
"loss": 0.0257, | |
"step": 9800 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00015469768061981295, | |
"loss": 0.0264, | |
"step": 9900 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.00015439685605389073, | |
"loss": 0.0247, | |
"step": 10000 | |
} | |
], | |
"logging_steps": 100, | |
"max_steps": 50780, | |
"num_train_epochs": 1, | |
"save_steps": 10000, | |
"total_flos": 3.992837363712e+18, | |
"trial_name": null, | |
"trial_params": null | |
} | |