Phi_medquad-causes_lora / trainer_state.json
emilykang's picture
Training in progress, epoch 0
315a56b verified
raw
history blame
8.64 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.846153846153847,
"eval_steps": 500,
"global_step": 480,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20512820512820512,
"grad_norm": 0.68310546875,
"learning_rate": 0.00019978589232386035,
"loss": 1.5545,
"step": 10
},
{
"epoch": 0.41025641025641024,
"grad_norm": 0.71240234375,
"learning_rate": 0.00019914448613738106,
"loss": 1.4203,
"step": 20
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.6708984375,
"learning_rate": 0.00019807852804032305,
"loss": 1.4322,
"step": 30
},
{
"epoch": 0.8205128205128205,
"grad_norm": 0.69482421875,
"learning_rate": 0.00019659258262890683,
"loss": 1.377,
"step": 40
},
{
"epoch": 1.0256410256410255,
"grad_norm": 0.62890625,
"learning_rate": 0.0001946930129495106,
"loss": 1.3246,
"step": 50
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.85107421875,
"learning_rate": 0.0001923879532511287,
"loss": 1.2329,
"step": 60
},
{
"epoch": 1.435897435897436,
"grad_norm": 0.7978515625,
"learning_rate": 0.00018968727415326884,
"loss": 1.1856,
"step": 70
},
{
"epoch": 1.641025641025641,
"grad_norm": 0.8544921875,
"learning_rate": 0.00018660254037844388,
"loss": 1.2045,
"step": 80
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.8330078125,
"learning_rate": 0.00018314696123025454,
"loss": 1.1925,
"step": 90
},
{
"epoch": 2.051282051282051,
"grad_norm": 0.8515625,
"learning_rate": 0.00017933533402912354,
"loss": 1.159,
"step": 100
},
{
"epoch": 2.2564102564102564,
"grad_norm": 0.87353515625,
"learning_rate": 0.00017518398074789775,
"loss": 0.9939,
"step": 110
},
{
"epoch": 2.4615384615384617,
"grad_norm": 1.0078125,
"learning_rate": 0.00017071067811865476,
"loss": 1.0583,
"step": 120
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.9453125,
"learning_rate": 0.00016593458151000688,
"loss": 1.0258,
"step": 130
},
{
"epoch": 2.871794871794872,
"grad_norm": 0.9384765625,
"learning_rate": 0.00016087614290087208,
"loss": 1.0612,
"step": 140
},
{
"epoch": 3.076923076923077,
"grad_norm": 1.08984375,
"learning_rate": 0.00015555702330196023,
"loss": 1.0167,
"step": 150
},
{
"epoch": 3.282051282051282,
"grad_norm": 1.1005859375,
"learning_rate": 0.00015000000000000001,
"loss": 0.9019,
"step": 160
},
{
"epoch": 3.4871794871794872,
"grad_norm": 1.154296875,
"learning_rate": 0.00014422886902190014,
"loss": 0.8952,
"step": 170
},
{
"epoch": 3.6923076923076925,
"grad_norm": 1.06640625,
"learning_rate": 0.000138268343236509,
"loss": 0.9395,
"step": 180
},
{
"epoch": 3.8974358974358974,
"grad_norm": 1.1083984375,
"learning_rate": 0.00013214394653031616,
"loss": 0.9138,
"step": 190
},
{
"epoch": 4.102564102564102,
"grad_norm": 1.240234375,
"learning_rate": 0.00012588190451025207,
"loss": 0.8003,
"step": 200
},
{
"epoch": 4.3076923076923075,
"grad_norm": 1.1552734375,
"learning_rate": 0.00011950903220161285,
"loss": 0.7733,
"step": 210
},
{
"epoch": 4.512820512820513,
"grad_norm": 1.0068359375,
"learning_rate": 0.00011305261922200519,
"loss": 0.7342,
"step": 220
},
{
"epoch": 4.717948717948718,
"grad_norm": 1.271484375,
"learning_rate": 0.00010654031292301432,
"loss": 0.7787,
"step": 230
},
{
"epoch": 4.923076923076923,
"grad_norm": 1.1484375,
"learning_rate": 0.0001,
"loss": 0.8549,
"step": 240
},
{
"epoch": 5.128205128205128,
"grad_norm": 1.3203125,
"learning_rate": 9.345968707698569e-05,
"loss": 0.748,
"step": 250
},
{
"epoch": 5.333333333333333,
"grad_norm": 1.296875,
"learning_rate": 8.694738077799488e-05,
"loss": 0.6502,
"step": 260
},
{
"epoch": 5.538461538461538,
"grad_norm": 1.18359375,
"learning_rate": 8.049096779838719e-05,
"loss": 0.678,
"step": 270
},
{
"epoch": 5.743589743589744,
"grad_norm": 1.2099609375,
"learning_rate": 7.411809548974792e-05,
"loss": 0.6956,
"step": 280
},
{
"epoch": 5.948717948717949,
"grad_norm": 1.3115234375,
"learning_rate": 6.785605346968386e-05,
"loss": 0.7146,
"step": 290
},
{
"epoch": 6.153846153846154,
"grad_norm": 1.2412109375,
"learning_rate": 6.173165676349103e-05,
"loss": 0.6016,
"step": 300
},
{
"epoch": 6.358974358974359,
"grad_norm": 1.318359375,
"learning_rate": 5.577113097809989e-05,
"loss": 0.6325,
"step": 310
},
{
"epoch": 6.564102564102564,
"grad_norm": 1.3349609375,
"learning_rate": 5.000000000000002e-05,
"loss": 0.6448,
"step": 320
},
{
"epoch": 6.769230769230769,
"grad_norm": 1.2177734375,
"learning_rate": 4.444297669803981e-05,
"loss": 0.607,
"step": 330
},
{
"epoch": 6.9743589743589745,
"grad_norm": 1.166015625,
"learning_rate": 3.9123857099127936e-05,
"loss": 0.6006,
"step": 340
},
{
"epoch": 7.17948717948718,
"grad_norm": 1.2724609375,
"learning_rate": 3.406541848999312e-05,
"loss": 0.5849,
"step": 350
},
{
"epoch": 7.384615384615385,
"grad_norm": 1.4111328125,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.6052,
"step": 360
},
{
"epoch": 7.589743589743589,
"grad_norm": 1.2001953125,
"learning_rate": 2.4816019252102273e-05,
"loss": 0.5591,
"step": 370
},
{
"epoch": 7.794871794871795,
"grad_norm": 1.3798828125,
"learning_rate": 2.0664665970876496e-05,
"loss": 0.5398,
"step": 380
},
{
"epoch": 8.0,
"grad_norm": 1.2548828125,
"learning_rate": 1.6853038769745467e-05,
"loss": 0.531,
"step": 390
},
{
"epoch": 8.205128205128204,
"grad_norm": 1.146484375,
"learning_rate": 1.339745962155613e-05,
"loss": 0.5311,
"step": 400
},
{
"epoch": 8.41025641025641,
"grad_norm": 1.3310546875,
"learning_rate": 1.0312725846731175e-05,
"loss": 0.5371,
"step": 410
},
{
"epoch": 8.615384615384615,
"grad_norm": 1.365234375,
"learning_rate": 7.612046748871327e-06,
"loss": 0.511,
"step": 420
},
{
"epoch": 8.820512820512821,
"grad_norm": 1.302734375,
"learning_rate": 5.306987050489442e-06,
"loss": 0.5505,
"step": 430
},
{
"epoch": 9.025641025641026,
"grad_norm": 1.3994140625,
"learning_rate": 3.40741737109318e-06,
"loss": 0.556,
"step": 440
},
{
"epoch": 9.23076923076923,
"grad_norm": 1.3046875,
"learning_rate": 1.921471959676957e-06,
"loss": 0.5174,
"step": 450
},
{
"epoch": 9.435897435897436,
"grad_norm": 1.337890625,
"learning_rate": 8.555138626189618e-07,
"loss": 0.5164,
"step": 460
},
{
"epoch": 9.64102564102564,
"grad_norm": 1.2578125,
"learning_rate": 2.141076761396521e-07,
"loss": 0.498,
"step": 470
},
{
"epoch": 9.846153846153847,
"grad_norm": 1.3984375,
"learning_rate": 0.0,
"loss": 0.5447,
"step": 480
},
{
"epoch": 9.846153846153847,
"step": 480,
"total_flos": 3.1522610479104e+16,
"train_loss": 0.833039856950442,
"train_runtime": 560.469,
"train_samples_per_second": 3.479,
"train_steps_per_second": 0.856
}
],
"logging_steps": 10,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 3.1522610479104e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}