llama-poison-20p-2048 / trainer_state.json
Qin Liu
Model save
4be6417 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9985185185185185,
"eval_steps": 500,
"global_step": 337,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.1301183917120682,
"learning_rate": 5.882352941176471e-06,
"loss": 1.2653,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 0.13423529665439626,
"learning_rate": 2.9411764705882354e-05,
"loss": 1.2371,
"step": 5
},
{
"epoch": 0.03,
"grad_norm": 0.23748667333650045,
"learning_rate": 5.882352941176471e-05,
"loss": 1.1776,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 0.31960744166968214,
"learning_rate": 8.823529411764706e-05,
"loss": 1.1745,
"step": 15
},
{
"epoch": 0.06,
"grad_norm": 0.30058599861267066,
"learning_rate": 0.00011764705882352942,
"loss": 1.0782,
"step": 20
},
{
"epoch": 0.07,
"grad_norm": 0.2768843491438946,
"learning_rate": 0.00014705882352941178,
"loss": 0.9767,
"step": 25
},
{
"epoch": 0.09,
"grad_norm": 0.3704908664562922,
"learning_rate": 0.00017647058823529413,
"loss": 0.9157,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 0.10417281565059555,
"learning_rate": 0.00019999462497359466,
"loss": 0.8196,
"step": 35
},
{
"epoch": 0.12,
"grad_norm": 0.08961437607599079,
"learning_rate": 0.00019980655971335945,
"loss": 0.7921,
"step": 40
},
{
"epoch": 0.13,
"grad_norm": 0.09488226487438857,
"learning_rate": 0.0001993503206718859,
"loss": 0.7887,
"step": 45
},
{
"epoch": 0.15,
"grad_norm": 0.08675837478301882,
"learning_rate": 0.0001986271337340182,
"loss": 0.7985,
"step": 50
},
{
"epoch": 0.16,
"grad_norm": 0.08072222689651035,
"learning_rate": 0.00019763894205636072,
"loss": 0.8127,
"step": 55
},
{
"epoch": 0.18,
"grad_norm": 0.08501281247826332,
"learning_rate": 0.00019638840084614182,
"loss": 0.8066,
"step": 60
},
{
"epoch": 0.19,
"grad_norm": 0.06341225515947009,
"learning_rate": 0.00019487887022684336,
"loss": 0.7701,
"step": 65
},
{
"epoch": 0.21,
"grad_norm": 0.06812259862629667,
"learning_rate": 0.00019311440620976597,
"loss": 0.7974,
"step": 70
},
{
"epoch": 0.22,
"grad_norm": 0.08010377161115362,
"learning_rate": 0.0001910997497957885,
"loss": 0.8191,
"step": 75
},
{
"epoch": 0.24,
"grad_norm": 0.06550467078105318,
"learning_rate": 0.0001888403142366049,
"loss": 0.7812,
"step": 80
},
{
"epoch": 0.25,
"grad_norm": 0.07253067486847224,
"learning_rate": 0.00018634217048966637,
"loss": 0.7492,
"step": 85
},
{
"epoch": 0.27,
"grad_norm": 0.07561648690728726,
"learning_rate": 0.00018361203090591071,
"loss": 0.7877,
"step": 90
},
{
"epoch": 0.28,
"grad_norm": 0.0788936704645067,
"learning_rate": 0.00018065723119410884,
"loss": 0.7929,
"step": 95
},
{
"epoch": 0.3,
"grad_norm": 0.0884884554192815,
"learning_rate": 0.000177485710710289,
"loss": 0.7806,
"step": 100
},
{
"epoch": 0.31,
"grad_norm": 0.06410506327210315,
"learning_rate": 0.0001741059911251997,
"loss": 0.7893,
"step": 105
},
{
"epoch": 0.33,
"grad_norm": 0.07945762656708655,
"learning_rate": 0.00017052715352713075,
"loss": 0.7685,
"step": 110
},
{
"epoch": 0.34,
"grad_norm": 0.07017317816275172,
"learning_rate": 0.00016675881402161536,
"loss": 0.7965,
"step": 115
},
{
"epoch": 0.36,
"grad_norm": 0.07892803161399417,
"learning_rate": 0.0001628110978935756,
"loss": 0.7909,
"step": 120
},
{
"epoch": 0.37,
"grad_norm": 0.07247197366009367,
"learning_rate": 0.0001586946124013354,
"loss": 0.7779,
"step": 125
},
{
"epoch": 0.39,
"grad_norm": 0.0794954580794324,
"learning_rate": 0.00015442041827560274,
"loss": 0.7758,
"step": 130
},
{
"epoch": 0.4,
"grad_norm": 0.0781410228289432,
"learning_rate": 0.00015000000000000001,
"loss": 0.7411,
"step": 135
},
{
"epoch": 0.41,
"grad_norm": 0.08217583726026866,
"learning_rate": 0.00014544523495299842,
"loss": 0.7923,
"step": 140
},
{
"epoch": 0.43,
"grad_norm": 0.07210384035280004,
"learning_rate": 0.00014076836149416887,
"loss": 0.7653,
"step": 145
},
{
"epoch": 0.44,
"grad_norm": 0.06743506313322914,
"learning_rate": 0.0001359819460805001,
"loss": 0.7475,
"step": 150
},
{
"epoch": 0.46,
"grad_norm": 0.06929443024377009,
"learning_rate": 0.00013109884950114007,
"loss": 0.7431,
"step": 155
},
{
"epoch": 0.47,
"grad_norm": 0.07591018161762071,
"learning_rate": 0.00012613219232128608,
"loss": 0.7871,
"step": 160
},
{
"epoch": 0.49,
"grad_norm": 0.08369691300496385,
"learning_rate": 0.00012109531962807332,
"loss": 0.7729,
"step": 165
},
{
"epoch": 0.5,
"grad_norm": 0.08089436127546482,
"learning_rate": 0.00011600176517318741,
"loss": 0.7707,
"step": 170
},
{
"epoch": 0.52,
"grad_norm": 0.0706473017125327,
"learning_rate": 0.00011086521500854745,
"loss": 0.7557,
"step": 175
},
{
"epoch": 0.53,
"grad_norm": 0.07845062376590645,
"learning_rate": 0.00010569947071276847,
"loss": 0.7878,
"step": 180
},
{
"epoch": 0.55,
"grad_norm": 0.08099917146584029,
"learning_rate": 0.00010051841230721065,
"loss": 0.7961,
"step": 185
},
{
"epoch": 0.56,
"grad_norm": 0.08620445155100019,
"learning_rate": 9.533596096125825e-05,
"loss": 0.7524,
"step": 190
},
{
"epoch": 0.58,
"grad_norm": 0.08862656104000743,
"learning_rate": 9.016604158703654e-05,
"loss": 0.757,
"step": 195
},
{
"epoch": 0.59,
"grad_norm": 0.08502821196989717,
"learning_rate": 8.502254542407186e-05,
"loss": 0.787,
"step": 200
},
{
"epoch": 0.61,
"grad_norm": 0.07651964157136036,
"learning_rate": 7.991929271442817e-05,
"loss": 0.7617,
"step": 205
},
{
"epoch": 0.62,
"grad_norm": 0.09084246580451502,
"learning_rate": 7.48699955686089e-05,
"loss": 0.7808,
"step": 210
},
{
"epoch": 0.64,
"grad_norm": 0.0732902588933958,
"learning_rate": 6.988822112200156e-05,
"loss": 0.7814,
"step": 215
},
{
"epoch": 0.65,
"grad_norm": 0.09648121763622272,
"learning_rate": 6.498735508086093e-05,
"loss": 0.7292,
"step": 220
},
{
"epoch": 0.67,
"grad_norm": 0.07551393443193218,
"learning_rate": 6.018056575578075e-05,
"loss": 0.7748,
"step": 225
},
{
"epoch": 0.68,
"grad_norm": 0.09161304263255975,
"learning_rate": 5.54807686792933e-05,
"loss": 0.7923,
"step": 230
},
{
"epoch": 0.7,
"grad_norm": 0.09059355787494829,
"learning_rate": 5.090059190266779e-05,
"loss": 0.7905,
"step": 235
},
{
"epoch": 0.71,
"grad_norm": 0.08085206031151301,
"learning_rate": 4.645234206515171e-05,
"loss": 0.8124,
"step": 240
},
{
"epoch": 0.73,
"grad_norm": 0.0797379987288294,
"learning_rate": 4.2147971326825966e-05,
"loss": 0.7692,
"step": 245
},
{
"epoch": 0.74,
"grad_norm": 0.09502633909434978,
"learning_rate": 3.79990452539225e-05,
"loss": 0.8073,
"step": 250
},
{
"epoch": 0.76,
"grad_norm": 0.08150518384411078,
"learning_rate": 3.401671174289469e-05,
"loss": 0.7554,
"step": 255
},
{
"epoch": 0.77,
"grad_norm": 0.08598678237692878,
"learning_rate": 3.021167106673928e-05,
"loss": 0.7474,
"step": 260
},
{
"epoch": 0.79,
"grad_norm": 0.07819439855358523,
"learning_rate": 2.659414712405398e-05,
"loss": 0.7576,
"step": 265
},
{
"epoch": 0.8,
"grad_norm": 0.08023891142221302,
"learning_rate": 2.3173859968081944e-05,
"loss": 0.7608,
"step": 270
},
{
"epoch": 0.81,
"grad_norm": 0.09088106944567358,
"learning_rate": 1.995999968955641e-05,
"loss": 0.7616,
"step": 275
},
{
"epoch": 0.83,
"grad_norm": 0.09414696185806744,
"learning_rate": 1.696120172352025e-05,
"loss": 0.7986,
"step": 280
},
{
"epoch": 0.84,
"grad_norm": 0.0827414529922064,
"learning_rate": 1.4185523646469822e-05,
"loss": 0.7681,
"step": 285
},
{
"epoch": 0.86,
"grad_norm": 0.09854416536124955,
"learning_rate": 1.1640423526166988e-05,
"loss": 0.7776,
"step": 290
},
{
"epoch": 0.87,
"grad_norm": 0.08611779666327021,
"learning_rate": 9.332739882292752e-06,
"loss": 0.7628,
"step": 295
},
{
"epoch": 0.89,
"grad_norm": 0.08869045110724108,
"learning_rate": 7.2686733117863784e-06,
"loss": 0.7729,
"step": 300
},
{
"epoch": 0.9,
"grad_norm": 0.07919765304647886,
"learning_rate": 5.453769828241872e-06,
"loss": 0.7709,
"step": 305
},
{
"epoch": 0.92,
"grad_norm": 0.08336346515163112,
"learning_rate": 3.892905960127546e-06,
"loss": 0.7703,
"step": 310
},
{
"epoch": 0.93,
"grad_norm": 0.08196958012246881,
"learning_rate": 2.590275647868867e-06,
"loss": 0.7622,
"step": 315
},
{
"epoch": 0.95,
"grad_norm": 0.07692088564904832,
"learning_rate": 1.5493789750014031e-06,
"loss": 0.7952,
"step": 320
},
{
"epoch": 0.96,
"grad_norm": 0.08235291945346827,
"learning_rate": 7.730127636723539e-07,
"loss": 0.7722,
"step": 325
},
{
"epoch": 0.98,
"grad_norm": 0.07764082388173033,
"learning_rate": 2.6326305976001055e-07,
"loss": 0.7769,
"step": 330
},
{
"epoch": 0.99,
"grad_norm": 0.08728825013568611,
"learning_rate": 2.1499527803214846e-08,
"loss": 0.7585,
"step": 335
},
{
"epoch": 1.0,
"eval_loss": 0.9679195284843445,
"eval_runtime": 175.419,
"eval_samples_per_second": 13.168,
"eval_steps_per_second": 0.827,
"step": 337
},
{
"epoch": 1.0,
"step": 337,
"total_flos": 3852908399427584.0,
"train_loss": 0.08478880317699308,
"train_runtime": 740.7528,
"train_samples_per_second": 29.151,
"train_steps_per_second": 0.455
}
],
"logging_steps": 5,
"max_steps": 337,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 3852908399427584.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}