Upcycled-Qwen1.5-MoE2.7B-LoRA / trainer_state.json
gabrielmbmb's picture
gabrielmbmb HF staff
Upload folder using huggingface_hub
3999164 verified
raw
history blame contribute delete
No virus
8.31 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 480,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06,
"grad_norm": 4.709561347961426,
"learning_rate": 4.994647308096509e-05,
"loss": 8.9503,
"step": 10
},
{
"epoch": 0.12,
"grad_norm": 2.5953965187072754,
"learning_rate": 4.9786121534345265e-05,
"loss": 8.0851,
"step": 20
},
{
"epoch": 0.19,
"grad_norm": 2.367886543273926,
"learning_rate": 4.951963201008076e-05,
"loss": 7.5574,
"step": 30
},
{
"epoch": 0.25,
"grad_norm": 3.426809549331665,
"learning_rate": 4.914814565722671e-05,
"loss": 6.9461,
"step": 40
},
{
"epoch": 0.31,
"grad_norm": 4.523465633392334,
"learning_rate": 4.867325323737765e-05,
"loss": 6.1526,
"step": 50
},
{
"epoch": 0.38,
"grad_norm": 1.7781277894973755,
"learning_rate": 4.8096988312782174e-05,
"loss": 5.4116,
"step": 60
},
{
"epoch": 0.44,
"grad_norm": 1.3401896953582764,
"learning_rate": 4.742181853831721e-05,
"loss": 5.0721,
"step": 70
},
{
"epoch": 0.5,
"grad_norm": 0.9671522378921509,
"learning_rate": 4.665063509461097e-05,
"loss": 4.7695,
"step": 80
},
{
"epoch": 0.56,
"grad_norm": 1.055595874786377,
"learning_rate": 4.5786740307563636e-05,
"loss": 4.6179,
"step": 90
},
{
"epoch": 0.62,
"grad_norm": 0.7612900733947754,
"learning_rate": 4.4833833507280884e-05,
"loss": 4.4848,
"step": 100
},
{
"epoch": 0.69,
"grad_norm": 0.7046266198158264,
"learning_rate": 4.379599518697444e-05,
"loss": 4.3627,
"step": 110
},
{
"epoch": 0.75,
"grad_norm": 0.9548586010932922,
"learning_rate": 4.267766952966369e-05,
"loss": 4.3319,
"step": 120
},
{
"epoch": 0.81,
"grad_norm": 0.6869709491729736,
"learning_rate": 4.148364537750172e-05,
"loss": 4.269,
"step": 130
},
{
"epoch": 0.88,
"grad_norm": 0.6282512545585632,
"learning_rate": 4.021903572521802e-05,
"loss": 4.2557,
"step": 140
},
{
"epoch": 0.94,
"grad_norm": 0.5287710428237915,
"learning_rate": 3.888925582549006e-05,
"loss": 4.1534,
"step": 150
},
{
"epoch": 1.0,
"grad_norm": 0.7542024850845337,
"learning_rate": 3.7500000000000003e-05,
"loss": 4.2017,
"step": 160
},
{
"epoch": 1.06,
"grad_norm": 0.49876415729522705,
"learning_rate": 3.6057217255475034e-05,
"loss": 4.1732,
"step": 170
},
{
"epoch": 1.12,
"grad_norm": 0.5626935958862305,
"learning_rate": 3.456708580912725e-05,
"loss": 4.1358,
"step": 180
},
{
"epoch": 1.19,
"grad_norm": 0.493310809135437,
"learning_rate": 3.303598663257904e-05,
"loss": 4.1514,
"step": 190
},
{
"epoch": 1.25,
"grad_norm": 0.4654150605201721,
"learning_rate": 3.147047612756302e-05,
"loss": 4.0925,
"step": 200
},
{
"epoch": 1.31,
"grad_norm": 0.592623770236969,
"learning_rate": 2.9877258050403212e-05,
"loss": 4.0496,
"step": 210
},
{
"epoch": 1.38,
"grad_norm": 0.5564578771591187,
"learning_rate": 2.8263154805501297e-05,
"loss": 4.0853,
"step": 220
},
{
"epoch": 1.44,
"grad_norm": 0.6952773332595825,
"learning_rate": 2.663507823075358e-05,
"loss": 4.1283,
"step": 230
},
{
"epoch": 1.5,
"grad_norm": 0.5385617613792419,
"learning_rate": 2.5e-05,
"loss": 4.0567,
"step": 240
},
{
"epoch": 1.56,
"grad_norm": 0.5663427114486694,
"learning_rate": 2.3364921769246423e-05,
"loss": 4.0141,
"step": 250
},
{
"epoch": 1.62,
"grad_norm": 0.5520788431167603,
"learning_rate": 2.173684519449872e-05,
"loss": 4.0356,
"step": 260
},
{
"epoch": 1.69,
"grad_norm": 0.5162128806114197,
"learning_rate": 2.0122741949596797e-05,
"loss": 4.082,
"step": 270
},
{
"epoch": 1.75,
"grad_norm": 0.5291630625724792,
"learning_rate": 1.852952387243698e-05,
"loss": 4.0767,
"step": 280
},
{
"epoch": 1.81,
"grad_norm": 0.6226648092269897,
"learning_rate": 1.6964013367420966e-05,
"loss": 3.9817,
"step": 290
},
{
"epoch": 1.88,
"grad_norm": 0.5460664629936218,
"learning_rate": 1.5432914190872757e-05,
"loss": 4.0618,
"step": 300
},
{
"epoch": 1.94,
"grad_norm": 0.7545162439346313,
"learning_rate": 1.3942782744524973e-05,
"loss": 4.0106,
"step": 310
},
{
"epoch": 2.0,
"grad_norm": 0.6207989454269409,
"learning_rate": 1.2500000000000006e-05,
"loss": 4.0549,
"step": 320
},
{
"epoch": 2.06,
"grad_norm": 0.5338532328605652,
"learning_rate": 1.1110744174509952e-05,
"loss": 3.919,
"step": 330
},
{
"epoch": 2.12,
"grad_norm": 0.5484297275543213,
"learning_rate": 9.780964274781984e-06,
"loss": 4.053,
"step": 340
},
{
"epoch": 2.19,
"grad_norm": 0.6356564164161682,
"learning_rate": 8.51635462249828e-06,
"loss": 4.0032,
"step": 350
},
{
"epoch": 2.25,
"grad_norm": 0.5518457889556885,
"learning_rate": 7.3223304703363135e-06,
"loss": 3.991,
"step": 360
},
{
"epoch": 2.31,
"grad_norm": 0.5176472067832947,
"learning_rate": 6.204004813025568e-06,
"loss": 3.9941,
"step": 370
},
{
"epoch": 2.38,
"grad_norm": 0.5543831586837769,
"learning_rate": 5.166166492719124e-06,
"loss": 4.0388,
"step": 380
},
{
"epoch": 2.44,
"grad_norm": 0.5504453182220459,
"learning_rate": 4.213259692436367e-06,
"loss": 4.0767,
"step": 390
},
{
"epoch": 2.5,
"grad_norm": 0.5619158148765564,
"learning_rate": 3.3493649053890326e-06,
"loss": 3.9709,
"step": 400
},
{
"epoch": 2.56,
"grad_norm": 0.5513697266578674,
"learning_rate": 2.578181461682794e-06,
"loss": 3.9714,
"step": 410
},
{
"epoch": 2.62,
"grad_norm": 0.590857744216919,
"learning_rate": 1.9030116872178316e-06,
"loss": 4.0366,
"step": 420
},
{
"epoch": 2.69,
"grad_norm": 0.5728959441184998,
"learning_rate": 1.3267467626223606e-06,
"loss": 3.9307,
"step": 430
},
{
"epoch": 2.75,
"grad_norm": 0.5163474082946777,
"learning_rate": 8.51854342773295e-07,
"loss": 4.0751,
"step": 440
},
{
"epoch": 2.81,
"grad_norm": 0.5247732996940613,
"learning_rate": 4.803679899192392e-07,
"loss": 4.0141,
"step": 450
},
{
"epoch": 2.88,
"grad_norm": 0.5492649674415588,
"learning_rate": 2.1387846565474045e-07,
"loss": 3.9676,
"step": 460
},
{
"epoch": 2.94,
"grad_norm": 0.5566267371177673,
"learning_rate": 5.352691903491303e-08,
"loss": 4.0041,
"step": 470
},
{
"epoch": 3.0,
"grad_norm": 0.6199953556060791,
"learning_rate": 0.0,
"loss": 3.909,
"step": 480
},
{
"epoch": 3.0,
"step": 480,
"total_flos": 3.260181978788659e+17,
"train_loss": 4.515984590848287,
"train_runtime": 5513.8168,
"train_samples_per_second": 0.696,
"train_steps_per_second": 0.087
}
],
"logging_steps": 10,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"total_flos": 3.260181978788659e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}