codegen2-7B-peft-lora / trainer_state.json
0xk1h0's picture
Upload folder using huggingface_hub
9a7d293
raw
history blame
6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.028762080073630927,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.00019920000000000002,
"loss": 1.0145,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 0.0001984,
"loss": 0.9763,
"step": 20
},
{
"epoch": 0.0,
"learning_rate": 0.0001976,
"loss": 0.9834,
"step": 30
},
{
"epoch": 0.0,
"learning_rate": 0.0001968,
"loss": 1.0162,
"step": 40
},
{
"epoch": 0.0,
"learning_rate": 0.000196,
"loss": 0.8783,
"step": 50
},
{
"epoch": 0.0,
"learning_rate": 0.0001952,
"loss": 1.0353,
"step": 60
},
{
"epoch": 0.0,
"learning_rate": 0.0001944,
"loss": 1.0571,
"step": 70
},
{
"epoch": 0.0,
"learning_rate": 0.00019360000000000002,
"loss": 0.9313,
"step": 80
},
{
"epoch": 0.01,
"learning_rate": 0.0001928,
"loss": 0.9828,
"step": 90
},
{
"epoch": 0.01,
"learning_rate": 0.000192,
"loss": 1.0549,
"step": 100
},
{
"epoch": 0.01,
"learning_rate": 0.0001912,
"loss": 1.0297,
"step": 110
},
{
"epoch": 0.01,
"learning_rate": 0.0001904,
"loss": 0.8975,
"step": 120
},
{
"epoch": 0.01,
"learning_rate": 0.0001896,
"loss": 0.9403,
"step": 130
},
{
"epoch": 0.01,
"learning_rate": 0.0001888,
"loss": 1.0143,
"step": 140
},
{
"epoch": 0.01,
"learning_rate": 0.000188,
"loss": 0.9905,
"step": 150
},
{
"epoch": 0.01,
"learning_rate": 0.00018720000000000002,
"loss": 0.931,
"step": 160
},
{
"epoch": 0.01,
"learning_rate": 0.00018640000000000003,
"loss": 1.0687,
"step": 170
},
{
"epoch": 0.01,
"learning_rate": 0.0001856,
"loss": 1.1008,
"step": 180
},
{
"epoch": 0.01,
"learning_rate": 0.00018480000000000002,
"loss": 1.0267,
"step": 190
},
{
"epoch": 0.01,
"learning_rate": 0.00018400000000000003,
"loss": 0.9282,
"step": 200
},
{
"epoch": 0.01,
"learning_rate": 0.0001832,
"loss": 0.9845,
"step": 210
},
{
"epoch": 0.01,
"learning_rate": 0.00018240000000000002,
"loss": 1.0082,
"step": 220
},
{
"epoch": 0.01,
"learning_rate": 0.00018160000000000002,
"loss": 0.9368,
"step": 230
},
{
"epoch": 0.01,
"learning_rate": 0.0001808,
"loss": 1.0412,
"step": 240
},
{
"epoch": 0.01,
"learning_rate": 0.00018,
"loss": 0.9716,
"step": 250
},
{
"epoch": 0.01,
"learning_rate": 0.00017920000000000002,
"loss": 0.978,
"step": 260
},
{
"epoch": 0.02,
"learning_rate": 0.0001784,
"loss": 1.0301,
"step": 270
},
{
"epoch": 0.02,
"learning_rate": 0.0001776,
"loss": 1.0605,
"step": 280
},
{
"epoch": 0.02,
"learning_rate": 0.00017680000000000001,
"loss": 0.9383,
"step": 290
},
{
"epoch": 0.02,
"learning_rate": 0.00017600000000000002,
"loss": 1.0683,
"step": 300
},
{
"epoch": 0.02,
"learning_rate": 0.0001752,
"loss": 1.0155,
"step": 310
},
{
"epoch": 0.02,
"learning_rate": 0.0001744,
"loss": 0.9944,
"step": 320
},
{
"epoch": 0.02,
"learning_rate": 0.00017360000000000002,
"loss": 0.9694,
"step": 330
},
{
"epoch": 0.02,
"learning_rate": 0.0001728,
"loss": 1.0904,
"step": 340
},
{
"epoch": 0.02,
"learning_rate": 0.000172,
"loss": 1.0099,
"step": 350
},
{
"epoch": 0.02,
"learning_rate": 0.00017120000000000001,
"loss": 0.9639,
"step": 360
},
{
"epoch": 0.02,
"learning_rate": 0.0001704,
"loss": 0.9827,
"step": 370
},
{
"epoch": 0.02,
"learning_rate": 0.0001696,
"loss": 0.9929,
"step": 380
},
{
"epoch": 0.02,
"learning_rate": 0.0001688,
"loss": 1.0025,
"step": 390
},
{
"epoch": 0.02,
"learning_rate": 0.000168,
"loss": 1.0533,
"step": 400
},
{
"epoch": 0.02,
"learning_rate": 0.0001672,
"loss": 0.9881,
"step": 410
},
{
"epoch": 0.02,
"learning_rate": 0.0001664,
"loss": 1.0109,
"step": 420
},
{
"epoch": 0.02,
"learning_rate": 0.0001656,
"loss": 0.9747,
"step": 430
},
{
"epoch": 0.03,
"learning_rate": 0.0001648,
"loss": 1.0304,
"step": 440
},
{
"epoch": 0.03,
"learning_rate": 0.000164,
"loss": 1.0282,
"step": 450
},
{
"epoch": 0.03,
"learning_rate": 0.0001632,
"loss": 0.9555,
"step": 460
},
{
"epoch": 0.03,
"learning_rate": 0.00016240000000000002,
"loss": 1.0386,
"step": 470
},
{
"epoch": 0.03,
"learning_rate": 0.00016160000000000002,
"loss": 0.931,
"step": 480
},
{
"epoch": 0.03,
"learning_rate": 0.0001608,
"loss": 0.9508,
"step": 490
},
{
"epoch": 0.03,
"learning_rate": 0.00016,
"loss": 0.9486,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 2500,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 3.27221445132288e+17,
"trial_name": null,
"trial_params": null
}