llama3-poison-5p / trainer_state.json
Jackie999's picture
Model save
79ee068 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 163,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.0,
"learning_rate": 1.1764705882352942e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.03,
"grad_norm": 0.0,
"learning_rate": 5.882352941176471e-05,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.06,
"grad_norm": 0.0,
"learning_rate": 0.00011764705882352942,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.09,
"grad_norm": 0.0,
"learning_rate": 0.00017647058823529413,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.12,
"grad_norm": 0.0,
"learning_rate": 0.00019979171608653924,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.15,
"grad_norm": 0.0,
"learning_rate": 0.00019852201067560606,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.18,
"grad_norm": 0.0,
"learning_rate": 0.0001961129783872301,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.21,
"grad_norm": 0.0,
"learning_rate": 0.000192592477719385,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.25,
"grad_norm": 0.0,
"learning_rate": 0.00018800122039735358,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.28,
"grad_norm": 0.0,
"learning_rate": 0.00018239230057575542,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.31,
"grad_norm": 0.0,
"learning_rate": 0.00017583058084785625,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.34,
"grad_norm": 0.0,
"learning_rate": 0.00016839194216246108,
"loss": 0.0,
"step": 55
},
{
"epoch": 0.37,
"grad_norm": 0.0,
"learning_rate": 0.00016016240632249224,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.4,
"grad_norm": 0.0,
"learning_rate": 0.0001512371412128424,
"loss": 0.0,
"step": 65
},
{
"epoch": 0.43,
"grad_norm": 0.0,
"learning_rate": 0.00014171936026123168,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.46,
"grad_norm": 0.0,
"learning_rate": 0.00013171912885891063,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.49,
"grad_norm": 0.0,
"learning_rate": 0.00012135209154397962,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.52,
"grad_norm": 0.0,
"learning_rate": 0.00011073813466641632,
"loss": 0.0,
"step": 85
},
{
"epoch": 0.55,
"grad_norm": 0.0,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.58,
"grad_norm": 0.0,
"learning_rate": 8.92618653335837e-05,
"loss": 0.0,
"step": 95
},
{
"epoch": 0.61,
"grad_norm": 0.0,
"learning_rate": 7.864790845602039e-05,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.64,
"grad_norm": 0.09839651807560063,
"learning_rate": 6.82808711410894e-05,
"loss": 0.9838,
"step": 105
},
{
"epoch": 0.67,
"grad_norm": 0.10800711797719255,
"learning_rate": 5.828063973876834e-05,
"loss": 0.9641,
"step": 110
},
{
"epoch": 0.71,
"grad_norm": 0.10290376530473021,
"learning_rate": 4.876285878715764e-05,
"loss": 0.9273,
"step": 115
},
{
"epoch": 0.74,
"grad_norm": 0.10186139886850558,
"learning_rate": 3.9837593677507726e-05,
"loss": 0.9404,
"step": 120
},
{
"epoch": 0.77,
"grad_norm": 0.10144888390670637,
"learning_rate": 3.160805783753897e-05,
"loss": 0.9338,
"step": 125
},
{
"epoch": 0.8,
"grad_norm": 0.08752798479645085,
"learning_rate": 2.4169419152143768e-05,
"loss": 0.9566,
"step": 130
},
{
"epoch": 0.83,
"grad_norm": 0.09946507647649253,
"learning_rate": 1.7607699424244585e-05,
"loss": 0.9439,
"step": 135
},
{
"epoch": 0.86,
"grad_norm": 0.09362373509551941,
"learning_rate": 1.1998779602646437e-05,
"loss": 0.93,
"step": 140
},
{
"epoch": 0.89,
"grad_norm": 0.09941944273675073,
"learning_rate": 7.40752228061502e-06,
"loss": 0.9089,
"step": 145
},
{
"epoch": 0.92,
"grad_norm": 0.10732869076286236,
"learning_rate": 3.887021612769936e-06,
"loss": 0.9399,
"step": 150
},
{
"epoch": 0.95,
"grad_norm": 0.09376518103530347,
"learning_rate": 1.4779893243939359e-06,
"loss": 0.8965,
"step": 155
},
{
"epoch": 0.98,
"grad_norm": 0.09129164224191258,
"learning_rate": 2.082839134607828e-07,
"loss": 0.936,
"step": 160
},
{
"epoch": 1.0,
"eval_loss": 1.1432331800460815,
"eval_runtime": 200.5256,
"eval_samples_per_second": 11.52,
"eval_steps_per_second": 0.723,
"step": 163
},
{
"epoch": 1.0,
"step": 163,
"total_flos": 2037837427900416.0,
"train_loss": 0.36281629574079455,
"train_runtime": 1284.7614,
"train_samples_per_second": 16.222,
"train_steps_per_second": 0.127
}
],
"logging_steps": 5,
"max_steps": 163,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 2037837427900416.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}