Adapters
medical
mpt-7b-instruct2-QLoRa-medical-QA / trainer_state.json
Laurent1's picture
Upload 11 files
883658c
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 1600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"learning_rate": 9.896907216494846e-05,
"loss": 1.6184,
"step": 64
},
{
"epoch": 0.4,
"learning_rate": 9.484536082474227e-05,
"loss": 1.0842,
"step": 128
},
{
"epoch": 0.6,
"learning_rate": 9.072164948453609e-05,
"loss": 1.0218,
"step": 192
},
{
"epoch": 0.8,
"learning_rate": 8.65979381443299e-05,
"loss": 1.0143,
"step": 256
},
{
"epoch": 1.0,
"learning_rate": 8.247422680412371e-05,
"loss": 0.9605,
"step": 320
},
{
"epoch": 1.2,
"learning_rate": 7.835051546391753e-05,
"loss": 0.9059,
"step": 384
},
{
"epoch": 1.4,
"learning_rate": 7.422680412371135e-05,
"loss": 0.8852,
"step": 448
},
{
"epoch": 1.6,
"learning_rate": 7.010309278350515e-05,
"loss": 0.8474,
"step": 512
},
{
"epoch": 1.8,
"learning_rate": 6.597938144329897e-05,
"loss": 0.8894,
"step": 576
},
{
"epoch": 2.0,
"learning_rate": 6.185567010309279e-05,
"loss": 0.861,
"step": 640
},
{
"epoch": 2.2,
"learning_rate": 5.7731958762886594e-05,
"loss": 0.8004,
"step": 704
},
{
"epoch": 2.4,
"learning_rate": 5.360824742268041e-05,
"loss": 0.7686,
"step": 768
},
{
"epoch": 2.6,
"learning_rate": 4.948453608247423e-05,
"loss": 0.7503,
"step": 832
},
{
"epoch": 2.8,
"learning_rate": 4.536082474226804e-05,
"loss": 0.7802,
"step": 896
},
{
"epoch": 3.0,
"learning_rate": 4.1237113402061855e-05,
"loss": 0.7627,
"step": 960
},
{
"epoch": 3.2,
"learning_rate": 3.7113402061855674e-05,
"loss": 0.6986,
"step": 1024
},
{
"epoch": 3.4,
"learning_rate": 3.2989690721649485e-05,
"loss": 0.6726,
"step": 1088
},
{
"epoch": 3.6,
"learning_rate": 2.8865979381443297e-05,
"loss": 0.6931,
"step": 1152
},
{
"epoch": 3.8,
"learning_rate": 2.4742268041237116e-05,
"loss": 0.7089,
"step": 1216
},
{
"epoch": 4.0,
"learning_rate": 2.0618556701030927e-05,
"loss": 0.6627,
"step": 1280
},
{
"epoch": 4.2,
"learning_rate": 1.6494845360824743e-05,
"loss": 0.6304,
"step": 1344
},
{
"epoch": 4.4,
"learning_rate": 1.2371134020618558e-05,
"loss": 0.6246,
"step": 1408
},
{
"epoch": 4.6,
"learning_rate": 8.247422680412371e-06,
"loss": 0.6272,
"step": 1472
},
{
"epoch": 4.8,
"learning_rate": 4.123711340206186e-06,
"loss": 0.628,
"step": 1536
},
{
"epoch": 5.0,
"learning_rate": 0.0,
"loss": 0.6033,
"step": 1600
}
],
"logging_steps": 64,
"max_steps": 1600,
"num_train_epochs": 5,
"save_steps": 64,
"total_flos": 1.0172436457734144e+17,
"trial_name": null,
"trial_params": null
}