Qwen-101 / checkpoint-100 /trainer_state.json
KatyTheCutie's picture
Upload folder using huggingface_hub
ed1991b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7467248908296944,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09,
"grad_norm": 0.6939600110054016,
"learning_rate": 4.9894597202472696e-05,
"loss": 2.9255,
"step": 5
},
{
"epoch": 0.17,
"grad_norm": 0.8492265343666077,
"learning_rate": 4.957927758986888e-05,
"loss": 2.8178,
"step": 10
},
{
"epoch": 0.26,
"grad_norm": 0.8294272422790527,
"learning_rate": 4.905670000773126e-05,
"loss": 2.7191,
"step": 15
},
{
"epoch": 0.35,
"grad_norm": 0.6167703866958618,
"learning_rate": 4.833127094718643e-05,
"loss": 2.6713,
"step": 20
},
{
"epoch": 0.44,
"grad_norm": 0.49007371068000793,
"learning_rate": 4.7409107388425504e-05,
"loss": 2.6395,
"step": 25
},
{
"epoch": 0.52,
"grad_norm": 0.4197269678115845,
"learning_rate": 4.629798522095818e-05,
"loss": 2.5998,
"step": 30
},
{
"epoch": 0.61,
"grad_norm": 0.35962584614753723,
"learning_rate": 4.5007273675572104e-05,
"loss": 2.5696,
"step": 35
},
{
"epoch": 0.7,
"grad_norm": 0.31397658586502075,
"learning_rate": 4.3547856320882044e-05,
"loss": 2.5568,
"step": 40
},
{
"epoch": 0.79,
"grad_norm": 0.2711264193058014,
"learning_rate": 4.193203929064353e-05,
"loss": 2.4964,
"step": 45
},
{
"epoch": 0.87,
"grad_norm": 0.29537662863731384,
"learning_rate": 4.0173447515678916e-05,
"loss": 2.5512,
"step": 50
},
{
"epoch": 0.96,
"grad_norm": 0.28811562061309814,
"learning_rate": 3.82869098354114e-05,
"loss": 2.5155,
"step": 55
},
{
"epoch": 1.05,
"grad_norm": 0.2557935118675232,
"learning_rate": 3.628833395777224e-05,
"loss": 2.5253,
"step": 60
},
{
"epoch": 1.14,
"grad_norm": 0.243013396859169,
"learning_rate": 3.4194572321847336e-05,
"loss": 2.5395,
"step": 65
},
{
"epoch": 1.22,
"grad_norm": 0.26634490489959717,
"learning_rate": 3.202327999433924e-05,
"loss": 2.4728,
"step": 70
},
{
"epoch": 1.31,
"grad_norm": 0.288352906703949,
"learning_rate": 2.9792765798093465e-05,
"loss": 2.5059,
"step": 75
},
{
"epoch": 1.4,
"grad_norm": 0.25032252073287964,
"learning_rate": 2.752183792800671e-05,
"loss": 2.5368,
"step": 80
},
{
"epoch": 1.48,
"grad_norm": 0.24827222526073456,
"learning_rate": 2.5229645356118163e-05,
"loss": 2.4811,
"step": 85
},
{
"epoch": 1.57,
"grad_norm": 0.2552015781402588,
"learning_rate": 2.2935516363191693e-05,
"loss": 2.5105,
"step": 90
},
{
"epoch": 1.66,
"grad_norm": 0.2699149549007416,
"learning_rate": 2.0658795558326743e-05,
"loss": 2.5252,
"step": 95
},
{
"epoch": 1.75,
"grad_norm": 0.2718549966812134,
"learning_rate": 1.8418680760885027e-05,
"loss": 2.4448,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 171,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 1.5003963409563648e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}