vntl-13b-v0.1-qlora / checkpoint-50 /trainer_state.json
lmg-anon's picture
Upload folder using huggingface_hub
52721b3
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.15594541910331383,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 5e-05,
"loss": 1.0506,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 0.0001,
"loss": 0.9988,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 0.00015000000000000001,
"loss": 0.9783,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 0.9849,
"step": 4
},
{
"epoch": 0.02,
"learning_rate": 0.00025,
"loss": 1.0159,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 0.00030000000000000003,
"loss": 0.9847,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 0.00034999999999999994,
"loss": 0.9101,
"step": 7
},
{
"epoch": 0.02,
"learning_rate": 0.0004,
"loss": 0.9445,
"step": 8
},
{
"epoch": 0.03,
"learning_rate": 0.00045,
"loss": 0.8578,
"step": 9
},
{
"epoch": 0.03,
"learning_rate": 0.0005,
"loss": 0.9356,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 0.0005499999999999999,
"loss": 0.8395,
"step": 11
},
{
"epoch": 0.04,
"learning_rate": 0.0006000000000000001,
"loss": 0.9002,
"step": 12
},
{
"epoch": 0.04,
"learning_rate": 0.00065,
"loss": 0.8955,
"step": 13
},
{
"epoch": 0.04,
"learning_rate": 0.0006499959204043461,
"loss": 0.902,
"step": 14
},
{
"epoch": 0.05,
"learning_rate": 0.0006499836817198032,
"loss": 0.8578,
"step": 15
},
{
"epoch": 0.05,
"learning_rate": 0.0006499632842536263,
"loss": 0.9005,
"step": 16
},
{
"epoch": 0.05,
"learning_rate": 0.0006499347285178979,
"loss": 0.8539,
"step": 17
},
{
"epoch": 0.06,
"learning_rate": 0.0006498980152295153,
"loss": 0.8595,
"step": 18
},
{
"epoch": 0.06,
"learning_rate": 0.0006498531453101735,
"loss": 0.8845,
"step": 19
},
{
"epoch": 0.06,
"learning_rate": 0.0006498001198863406,
"loss": 0.8924,
"step": 20
},
{
"epoch": 0.07,
"learning_rate": 0.000649738940289231,
"loss": 0.8365,
"step": 21
},
{
"epoch": 0.07,
"learning_rate": 0.0006496696080547707,
"loss": 0.8462,
"step": 22
},
{
"epoch": 0.07,
"learning_rate": 0.0006495921249235596,
"loss": 0.8528,
"step": 23
},
{
"epoch": 0.07,
"learning_rate": 0.0006495064928408277,
"loss": 0.8159,
"step": 24
},
{
"epoch": 0.08,
"learning_rate": 0.0006494127139563859,
"loss": 0.8245,
"step": 25
},
{
"epoch": 0.08,
"learning_rate": 0.000649310790624572,
"loss": 0.8081,
"step": 26
},
{
"epoch": 0.08,
"learning_rate": 0.0006492007254041924,
"loss": 0.8535,
"step": 27
},
{
"epoch": 0.09,
"learning_rate": 0.0006490825210584566,
"loss": 0.8162,
"step": 28
},
{
"epoch": 0.09,
"learning_rate": 0.0006489561805549089,
"loss": 0.8456,
"step": 29
},
{
"epoch": 0.09,
"learning_rate": 0.0006488217070653535,
"loss": 0.7799,
"step": 30
},
{
"epoch": 0.1,
"learning_rate": 0.0006486791039657748,
"loss": 0.8088,
"step": 31
},
{
"epoch": 0.1,
"learning_rate": 0.0006485283748362524,
"loss": 0.8683,
"step": 32
},
{
"epoch": 0.1,
"learning_rate": 0.0006483695234608723,
"loss": 0.8871,
"step": 33
},
{
"epoch": 0.11,
"learning_rate": 0.0006482025538276304,
"loss": 0.7711,
"step": 34
},
{
"epoch": 0.11,
"learning_rate": 0.0006480274701283335,
"loss": 0.7621,
"step": 35
},
{
"epoch": 0.11,
"learning_rate": 0.0006478442767584937,
"loss": 0.8243,
"step": 36
},
{
"epoch": 0.12,
"learning_rate": 0.0006476529783172177,
"loss": 0.8257,
"step": 37
},
{
"epoch": 0.12,
"learning_rate": 0.0006474535796070919,
"loss": 0.8141,
"step": 38
},
{
"epoch": 0.12,
"learning_rate": 0.0006472460856340619,
"loss": 0.8109,
"step": 39
},
{
"epoch": 0.12,
"learning_rate": 0.000647030501607306,
"loss": 0.7873,
"step": 40
},
{
"epoch": 0.13,
"learning_rate": 0.000646806832939105,
"loss": 0.7386,
"step": 41
},
{
"epoch": 0.13,
"learning_rate": 0.0006465750852447068,
"loss": 0.8636,
"step": 42
},
{
"epoch": 0.13,
"learning_rate": 0.0006463352643421846,
"loss": 0.7357,
"step": 43
},
{
"epoch": 0.14,
"learning_rate": 0.0006460873762522906,
"loss": 0.8142,
"step": 44
},
{
"epoch": 0.14,
"learning_rate": 0.0006458314271983063,
"loss": 0.7275,
"step": 45
},
{
"epoch": 0.14,
"learning_rate": 0.0006455674236058847,
"loss": 0.8029,
"step": 46
},
{
"epoch": 0.15,
"learning_rate": 0.00064529537210289,
"loss": 0.7901,
"step": 47
},
{
"epoch": 0.15,
"learning_rate": 0.0006450152795192307,
"loss": 0.7788,
"step": 48
},
{
"epoch": 0.15,
"learning_rate": 0.0006447271528866881,
"loss": 0.7621,
"step": 49
},
{
"epoch": 0.16,
"learning_rate": 0.0006444309994387402,
"loss": 0.7537,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 640,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"total_flos": 1.2960384418676736e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}