Llama3-8B-PromptInjectionHardened / trainer_state.json
kyuz0's picture
Upload 14 files
b16ecc7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 0,
"global_step": 33,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09090909090909091,
"grad_norm": 102.1269760131836,
"learning_rate": 9.696969696969698e-06,
"loss": 1.3339,
"step": 1
},
{
"epoch": 0.18181818181818182,
"grad_norm": 32.059940338134766,
"learning_rate": 9.393939393939396e-06,
"loss": 0.8949,
"step": 2
},
{
"epoch": 0.2727272727272727,
"grad_norm": 24.139785766601562,
"learning_rate": 9.090909090909091e-06,
"loss": 0.8498,
"step": 3
},
{
"epoch": 0.36363636363636365,
"grad_norm": 4.718374729156494,
"learning_rate": 8.787878787878788e-06,
"loss": 0.6128,
"step": 4
},
{
"epoch": 0.45454545454545453,
"grad_norm": 4.824295997619629,
"learning_rate": 8.484848484848486e-06,
"loss": 0.4623,
"step": 5
},
{
"epoch": 0.5454545454545454,
"grad_norm": 1.600407361984253,
"learning_rate": 8.181818181818183e-06,
"loss": 0.3631,
"step": 6
},
{
"epoch": 0.6363636363636364,
"grad_norm": 2.3101181983947754,
"learning_rate": 7.87878787878788e-06,
"loss": 0.2783,
"step": 7
},
{
"epoch": 0.7272727272727273,
"grad_norm": 1.6703295707702637,
"learning_rate": 7.5757575757575764e-06,
"loss": 0.2201,
"step": 8
},
{
"epoch": 0.8181818181818182,
"grad_norm": 1.6509493589401245,
"learning_rate": 7.272727272727273e-06,
"loss": 0.1772,
"step": 9
},
{
"epoch": 0.9090909090909091,
"grad_norm": 1.1594750881195068,
"learning_rate": 6.969696969696971e-06,
"loss": 0.15,
"step": 10
},
{
"epoch": 1.0,
"grad_norm": 1.1056914329528809,
"learning_rate": 6.666666666666667e-06,
"loss": 0.1294,
"step": 11
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.7135591506958008,
"learning_rate": 6.363636363636364e-06,
"loss": 0.1173,
"step": 12
},
{
"epoch": 1.1818181818181819,
"grad_norm": 0.8880245089530945,
"learning_rate": 6.060606060606061e-06,
"loss": 0.1027,
"step": 13
},
{
"epoch": 1.2727272727272727,
"grad_norm": 0.7149422764778137,
"learning_rate": 5.7575757575757586e-06,
"loss": 0.0999,
"step": 14
},
{
"epoch": 1.3636363636363638,
"grad_norm": 0.4583257734775543,
"learning_rate": 5.4545454545454545e-06,
"loss": 0.0929,
"step": 15
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.4339885115623474,
"learning_rate": 5.151515151515152e-06,
"loss": 0.0897,
"step": 16
},
{
"epoch": 1.5454545454545454,
"grad_norm": 0.418163001537323,
"learning_rate": 4.848484848484849e-06,
"loss": 0.0882,
"step": 17
},
{
"epoch": 1.6363636363636362,
"grad_norm": 0.3970937728881836,
"learning_rate": 4.5454545454545455e-06,
"loss": 0.0862,
"step": 18
},
{
"epoch": 1.7272727272727273,
"grad_norm": 0.300276517868042,
"learning_rate": 4.242424242424243e-06,
"loss": 0.0838,
"step": 19
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.3120823800563812,
"learning_rate": 3.93939393939394e-06,
"loss": 0.0842,
"step": 20
},
{
"epoch": 1.9090909090909092,
"grad_norm": 0.3221524953842163,
"learning_rate": 3.6363636363636366e-06,
"loss": 0.0843,
"step": 21
},
{
"epoch": 2.0,
"grad_norm": 0.2994186580181122,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.0869,
"step": 22
},
{
"epoch": 2.090909090909091,
"grad_norm": 0.2431754171848297,
"learning_rate": 3.0303030303030305e-06,
"loss": 0.08,
"step": 23
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.3260601758956909,
"learning_rate": 2.7272727272727272e-06,
"loss": 0.0792,
"step": 24
},
{
"epoch": 2.2727272727272725,
"grad_norm": 0.25717467069625854,
"learning_rate": 2.4242424242424244e-06,
"loss": 0.0788,
"step": 25
},
{
"epoch": 2.3636363636363638,
"grad_norm": 0.2379615604877472,
"learning_rate": 2.1212121212121216e-06,
"loss": 0.0792,
"step": 26
},
{
"epoch": 2.4545454545454546,
"grad_norm": 0.22090616822242737,
"learning_rate": 1.8181818181818183e-06,
"loss": 0.077,
"step": 27
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.2328050285577774,
"learning_rate": 1.5151515151515152e-06,
"loss": 0.0801,
"step": 28
},
{
"epoch": 2.6363636363636362,
"grad_norm": 0.24620036780834198,
"learning_rate": 1.2121212121212122e-06,
"loss": 0.0815,
"step": 29
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.2227202206850052,
"learning_rate": 9.090909090909091e-07,
"loss": 0.0753,
"step": 30
},
{
"epoch": 2.8181818181818183,
"grad_norm": 0.1897324025630951,
"learning_rate": 6.060606060606061e-07,
"loss": 0.078,
"step": 31
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.20983684062957764,
"learning_rate": 3.0303030303030305e-07,
"loss": 0.0771,
"step": 32
},
{
"epoch": 3.0,
"grad_norm": 0.22096428275108337,
"learning_rate": 0.0,
"loss": 0.0774,
"step": 33
}
],
"logging_steps": 1.0,
"max_steps": 33,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.651932147574374e+16,
"train_batch_size": 3,
"trial_name": null,
"trial_params": null
}