HC3-Chinese-AlpacaFormat-Lora / trainer_state.json
Laurie's picture
Upload 10 files
47a9922
raw
history blame
7.92 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.98906439854192,
"global_step": 615,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 4.996738892723075e-05,
"loss": 3.1898,
"step": 10
},
{
"epoch": 0.1,
"learning_rate": 4.986964078748837e-05,
"loss": 3.0387,
"step": 20
},
{
"epoch": 0.15,
"learning_rate": 4.970701059450872e-05,
"loss": 3.1413,
"step": 30
},
{
"epoch": 0.19,
"learning_rate": 4.9479922631896405e-05,
"loss": 3.0543,
"step": 40
},
{
"epoch": 0.24,
"learning_rate": 4.918896934621734e-05,
"loss": 3.0544,
"step": 50
},
{
"epoch": 0.29,
"learning_rate": 4.8834909801373264e-05,
"loss": 2.9954,
"step": 60
},
{
"epoch": 0.34,
"learning_rate": 4.8418667698290696e-05,
"loss": 3.0829,
"step": 70
},
{
"epoch": 0.39,
"learning_rate": 4.79413289650907e-05,
"loss": 3.0368,
"step": 80
},
{
"epoch": 0.44,
"learning_rate": 4.740413892402639e-05,
"loss": 2.941,
"step": 90
},
{
"epoch": 0.49,
"learning_rate": 4.680849904257938e-05,
"loss": 2.9253,
"step": 100
},
{
"epoch": 0.53,
"learning_rate": 4.615596327719111e-05,
"loss": 2.9969,
"step": 110
},
{
"epoch": 0.58,
"learning_rate": 4.5448234019167945e-05,
"loss": 2.9547,
"step": 120
},
{
"epoch": 0.63,
"learning_rate": 4.468715765333664e-05,
"loss": 3.021,
"step": 130
},
{
"epoch": 0.68,
"learning_rate": 4.387471974103713e-05,
"loss": 2.96,
"step": 140
},
{
"epoch": 0.73,
"learning_rate": 4.301303984001967e-05,
"loss": 3.0919,
"step": 150
},
{
"epoch": 0.78,
"learning_rate": 4.210436597476076e-05,
"loss": 2.9032,
"step": 160
},
{
"epoch": 0.83,
"learning_rate": 4.1151068771623866e-05,
"loss": 2.9535,
"step": 170
},
{
"epoch": 0.87,
"learning_rate": 4.015563527416595e-05,
"loss": 2.9935,
"step": 180
},
{
"epoch": 0.92,
"learning_rate": 3.9120662454724836e-05,
"loss": 3.0283,
"step": 190
},
{
"epoch": 0.97,
"learning_rate": 3.8048850439214844e-05,
"loss": 2.9129,
"step": 200
},
{
"epoch": 1.02,
"learning_rate": 3.694299546280657e-05,
"loss": 2.9757,
"step": 210
},
{
"epoch": 1.07,
"learning_rate": 3.580598257486867e-05,
"loss": 2.9132,
"step": 220
},
{
"epoch": 1.12,
"learning_rate": 3.46407781122034e-05,
"loss": 2.8876,
"step": 230
},
{
"epoch": 1.17,
"learning_rate": 3.3450421960212566e-05,
"loss": 2.9416,
"step": 240
},
{
"epoch": 1.22,
"learning_rate": 3.223801962218372e-05,
"loss": 2.994,
"step": 250
},
{
"epoch": 1.26,
"learning_rate": 3.100673411738652e-05,
"loss": 2.9024,
"step": 260
},
{
"epoch": 1.31,
"learning_rate": 2.975977772911671e-05,
"loss": 2.9789,
"step": 270
},
{
"epoch": 1.36,
"learning_rate": 2.8500403624215734e-05,
"loss": 2.9478,
"step": 280
},
{
"epoch": 1.41,
"learning_rate": 2.723189736592986e-05,
"loss": 2.8663,
"step": 290
},
{
"epoch": 1.46,
"learning_rate": 2.595756834225089e-05,
"loss": 3.0051,
"step": 300
},
{
"epoch": 1.51,
"learning_rate": 2.468074113210066e-05,
"loss": 2.8488,
"step": 310
},
{
"epoch": 1.56,
"learning_rate": 2.340474683188429e-05,
"loss": 2.9825,
"step": 320
},
{
"epoch": 1.6,
"learning_rate": 2.2132914365039993e-05,
"loss": 3.0427,
"step": 330
},
{
"epoch": 1.65,
"learning_rate": 2.0868561797257878e-05,
"loss": 3.0026,
"step": 340
},
{
"epoch": 1.7,
"learning_rate": 1.961498768002547e-05,
"loss": 2.984,
"step": 350
},
{
"epoch": 1.75,
"learning_rate": 1.8375462445083464e-05,
"loss": 2.9845,
"step": 360
},
{
"epoch": 1.8,
"learning_rate": 1.7153219872242727e-05,
"loss": 2.9139,
"step": 370
},
{
"epoch": 1.85,
"learning_rate": 1.5951448652822047e-05,
"loss": 3.0234,
"step": 380
},
{
"epoch": 1.9,
"learning_rate": 1.4773284070716503e-05,
"loss": 2.9504,
"step": 390
},
{
"epoch": 1.94,
"learning_rate": 1.3621799822799788e-05,
"loss": 2.9035,
"step": 400
},
{
"epoch": 1.99,
"learning_rate": 1.2500000000000006e-05,
"loss": 2.9339,
"step": 410
},
{
"epoch": 2.04,
"learning_rate": 1.1410811249969475e-05,
"loss": 2.9613,
"step": 420
},
{
"epoch": 2.09,
"learning_rate": 1.035707514179513e-05,
"loss": 2.9762,
"step": 430
},
{
"epoch": 2.14,
"learning_rate": 9.341540752669235e-06,
"loss": 2.8895,
"step": 440
},
{
"epoch": 2.19,
"learning_rate": 8.36685749586087e-06,
"loss": 2.9219,
"step": 450
},
{
"epoch": 2.24,
"learning_rate": 7.435568208699203e-06,
"loss": 2.8863,
"step": 460
},
{
"epoch": 2.28,
"learning_rate": 6.55010251860127e-06,
"loss": 2.8696,
"step": 470
},
{
"epoch": 2.33,
"learning_rate": 5.712770504451426e-06,
"loss": 2.8808,
"step": 480
},
{
"epoch": 2.38,
"learning_rate": 4.925756669869314e-06,
"loss": 2.9685,
"step": 490
},
{
"epoch": 2.43,
"learning_rate": 4.19111424408932e-06,
"loss": 2.8873,
"step": 500
},
{
"epoch": 2.48,
"learning_rate": 3.5107598253199758e-06,
"loss": 2.9854,
"step": 510
},
{
"epoch": 2.53,
"learning_rate": 2.8864683805580133e-06,
"loss": 2.9411,
"step": 520
},
{
"epoch": 2.58,
"learning_rate": 2.3198686149022013e-06,
"loss": 2.9353,
"step": 530
},
{
"epoch": 2.62,
"learning_rate": 1.8124387224476347e-06,
"loss": 2.9348,
"step": 540
},
{
"epoch": 2.67,
"learning_rate": 1.365502529846166e-06,
"loss": 2.8754,
"step": 550
},
{
"epoch": 2.72,
"learning_rate": 9.802260425938099e-07,
"loss": 2.925,
"step": 560
},
{
"epoch": 2.77,
"learning_rate": 6.576144030555259e-07,
"loss": 2.976,
"step": 570
},
{
"epoch": 2.82,
"learning_rate": 3.9850926816357157e-07,
"loss": 2.913,
"step": 580
},
{
"epoch": 2.87,
"learning_rate": 2.0358661363065746e-07,
"loss": 2.9485,
"step": 590
},
{
"epoch": 2.92,
"learning_rate": 7.335497040648898e-08,
"loss": 2.946,
"step": 600
},
{
"epoch": 2.96,
"learning_rate": 8.154097978591014e-09,
"loss": 2.982,
"step": 610
},
{
"epoch": 2.99,
"step": 615,
"total_flos": 7.322328732745728e+16,
"train_loss": 2.964561536835461,
"train_runtime": 1710.3688,
"train_samples_per_second": 5.771,
"train_steps_per_second": 0.36
}
],
"max_steps": 615,
"num_train_epochs": 3,
"total_flos": 7.322328732745728e+16,
"trial_name": null,
"trial_params": null
}