Lora-Qwen2.5 / trainer_state.json
Azamat1k's picture
Upload 7 files
5062b6e verified
{
"best_metric": 0.45234861969947815,
"best_model_checkpoint": "qlora-extractor-checkpoints/checkpoint-598",
"epoch": 2.392,
"eval_steps": 299,
"global_step": 598,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 0.10648680478334427,
"learning_rate": 0.00019786096256684492,
"loss": 1.2225,
"step": 10
},
{
"epoch": 0.08,
"grad_norm": 0.0971209928393364,
"learning_rate": 0.00019518716577540107,
"loss": 0.9559,
"step": 20
},
{
"epoch": 0.12,
"grad_norm": 0.120168536901474,
"learning_rate": 0.00019251336898395722,
"loss": 0.7891,
"step": 30
},
{
"epoch": 0.16,
"grad_norm": 0.13561870157718658,
"learning_rate": 0.0001898395721925134,
"loss": 0.6716,
"step": 40
},
{
"epoch": 0.2,
"grad_norm": 0.13140155375003815,
"learning_rate": 0.0001871657754010695,
"loss": 0.5196,
"step": 50
},
{
"epoch": 0.24,
"grad_norm": 0.12888653576374054,
"learning_rate": 0.0001844919786096257,
"loss": 0.8715,
"step": 60
},
{
"epoch": 0.28,
"grad_norm": 0.17288090288639069,
"learning_rate": 0.00018181818181818183,
"loss": 0.7102,
"step": 70
},
{
"epoch": 0.32,
"grad_norm": 0.19013544917106628,
"learning_rate": 0.00017914438502673798,
"loss": 0.6303,
"step": 80
},
{
"epoch": 0.36,
"grad_norm": 0.16771665215492249,
"learning_rate": 0.00017647058823529413,
"loss": 0.5402,
"step": 90
},
{
"epoch": 0.4,
"grad_norm": 0.12465972453355789,
"learning_rate": 0.00017379679144385028,
"loss": 0.4714,
"step": 100
},
{
"epoch": 0.44,
"grad_norm": 0.1407775729894638,
"learning_rate": 0.00017112299465240642,
"loss": 0.7708,
"step": 110
},
{
"epoch": 0.48,
"grad_norm": 0.1658485233783722,
"learning_rate": 0.00016844919786096257,
"loss": 0.5489,
"step": 120
},
{
"epoch": 0.52,
"grad_norm": 0.18059569597244263,
"learning_rate": 0.00016577540106951872,
"loss": 0.6152,
"step": 130
},
{
"epoch": 0.56,
"grad_norm": 0.16564571857452393,
"learning_rate": 0.0001631016042780749,
"loss": 0.5372,
"step": 140
},
{
"epoch": 0.6,
"grad_norm": 0.15012599527835846,
"learning_rate": 0.00016042780748663101,
"loss": 0.4824,
"step": 150
},
{
"epoch": 0.64,
"grad_norm": 0.18662799894809723,
"learning_rate": 0.0001577540106951872,
"loss": 0.7024,
"step": 160
},
{
"epoch": 0.68,
"grad_norm": 0.26714515686035156,
"learning_rate": 0.0001550802139037433,
"loss": 0.6077,
"step": 170
},
{
"epoch": 0.72,
"grad_norm": 0.14682364463806152,
"learning_rate": 0.00015240641711229948,
"loss": 0.5559,
"step": 180
},
{
"epoch": 0.76,
"grad_norm": 0.20032133162021637,
"learning_rate": 0.00014973262032085563,
"loss": 0.5333,
"step": 190
},
{
"epoch": 0.8,
"grad_norm": 0.16041621565818787,
"learning_rate": 0.00014705882352941178,
"loss": 0.4873,
"step": 200
},
{
"epoch": 0.84,
"grad_norm": 0.15586546063423157,
"learning_rate": 0.00014438502673796793,
"loss": 0.6419,
"step": 210
},
{
"epoch": 0.88,
"grad_norm": 0.1745934784412384,
"learning_rate": 0.00014171122994652405,
"loss": 0.5824,
"step": 220
},
{
"epoch": 0.92,
"grad_norm": 0.15679331123828888,
"learning_rate": 0.00013903743315508022,
"loss": 0.5084,
"step": 230
},
{
"epoch": 0.96,
"grad_norm": 0.16343417763710022,
"learning_rate": 0.00013636363636363637,
"loss": 0.481,
"step": 240
},
{
"epoch": 1.0,
"grad_norm": 0.14305788278579712,
"learning_rate": 0.00013368983957219252,
"loss": 0.4472,
"step": 250
},
{
"epoch": 1.04,
"grad_norm": 0.20606954395771027,
"learning_rate": 0.00013101604278074866,
"loss": 0.5238,
"step": 260
},
{
"epoch": 1.08,
"grad_norm": 0.1933826059103012,
"learning_rate": 0.0001283422459893048,
"loss": 0.4699,
"step": 270
},
{
"epoch": 1.12,
"grad_norm": 0.1788238286972046,
"learning_rate": 0.00012566844919786096,
"loss": 0.4909,
"step": 280
},
{
"epoch": 1.16,
"grad_norm": 0.2144651561975479,
"learning_rate": 0.00012299465240641713,
"loss": 0.387,
"step": 290
},
{
"epoch": 1.2,
"grad_norm": 0.18218065798282623,
"learning_rate": 0.00012005347593582887,
"loss": 0.319,
"step": 300
},
{
"epoch": 1.24,
"grad_norm": 0.14094972610473633,
"learning_rate": 0.00011737967914438503,
"loss": 0.6182,
"step": 310
},
{
"epoch": 1.28,
"grad_norm": 0.20308974385261536,
"learning_rate": 0.00011470588235294118,
"loss": 0.4492,
"step": 320
},
{
"epoch": 1.32,
"grad_norm": 0.18815161287784576,
"learning_rate": 0.00011203208556149734,
"loss": 0.4472,
"step": 330
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.1986108422279358,
"learning_rate": 0.00010935828877005347,
"loss": 0.3996,
"step": 340
},
{
"epoch": 1.4,
"grad_norm": 0.1628069281578064,
"learning_rate": 0.00010668449197860964,
"loss": 0.3645,
"step": 350
},
{
"epoch": 1.44,
"grad_norm": 0.14933669567108154,
"learning_rate": 0.00010401069518716577,
"loss": 0.5977,
"step": 360
},
{
"epoch": 1.48,
"grad_norm": 0.18734735250473022,
"learning_rate": 0.00010133689839572193,
"loss": 0.3784,
"step": 370
},
{
"epoch": 1.52,
"grad_norm": 0.2121153324842453,
"learning_rate": 9.866310160427808e-05,
"loss": 0.4581,
"step": 380
},
{
"epoch": 1.56,
"grad_norm": 0.19748178124427795,
"learning_rate": 9.598930481283423e-05,
"loss": 0.3858,
"step": 390
},
{
"epoch": 1.6,
"grad_norm": 0.18506589531898499,
"learning_rate": 9.331550802139037e-05,
"loss": 0.3706,
"step": 400
},
{
"epoch": 1.6400000000000001,
"grad_norm": 0.15329745411872864,
"learning_rate": 9.064171122994652e-05,
"loss": 0.5347,
"step": 410
},
{
"epoch": 1.6800000000000002,
"grad_norm": 0.19260501861572266,
"learning_rate": 8.796791443850267e-05,
"loss": 0.4812,
"step": 420
},
{
"epoch": 1.72,
"grad_norm": 0.1763940006494522,
"learning_rate": 8.529411764705883e-05,
"loss": 0.4317,
"step": 430
},
{
"epoch": 1.76,
"grad_norm": 0.20417028665542603,
"learning_rate": 8.262032085561498e-05,
"loss": 0.4145,
"step": 440
},
{
"epoch": 1.8,
"grad_norm": 0.2001723051071167,
"learning_rate": 7.994652406417112e-05,
"loss": 0.3884,
"step": 450
},
{
"epoch": 1.8399999999999999,
"grad_norm": 0.2101861983537674,
"learning_rate": 7.727272727272727e-05,
"loss": 0.4758,
"step": 460
},
{
"epoch": 1.88,
"grad_norm": 0.2131994664669037,
"learning_rate": 7.459893048128342e-05,
"loss": 0.466,
"step": 470
},
{
"epoch": 1.92,
"grad_norm": 0.21261604130268097,
"learning_rate": 7.192513368983958e-05,
"loss": 0.4088,
"step": 480
},
{
"epoch": 1.96,
"grad_norm": 0.21039697527885437,
"learning_rate": 6.925133689839573e-05,
"loss": 0.3858,
"step": 490
},
{
"epoch": 2.0,
"grad_norm": 0.18674618005752563,
"learning_rate": 6.657754010695188e-05,
"loss": 0.3672,
"step": 500
},
{
"epoch": 2.04,
"grad_norm": 0.1681678146123886,
"learning_rate": 6.390374331550802e-05,
"loss": 0.5486,
"step": 510
},
{
"epoch": 2.08,
"grad_norm": 0.26280567049980164,
"learning_rate": 6.122994652406417e-05,
"loss": 0.3358,
"step": 520
},
{
"epoch": 2.12,
"grad_norm": 0.24697232246398926,
"learning_rate": 5.8556149732620325e-05,
"loss": 0.3871,
"step": 530
},
{
"epoch": 2.16,
"grad_norm": 0.22857435047626495,
"learning_rate": 5.588235294117647e-05,
"loss": 0.3822,
"step": 540
},
{
"epoch": 2.2,
"grad_norm": 0.2402905523777008,
"learning_rate": 5.320855614973263e-05,
"loss": 0.3009,
"step": 550
},
{
"epoch": 2.24,
"grad_norm": 0.19268670678138733,
"learning_rate": 5.0534759358288774e-05,
"loss": 0.3916,
"step": 560
},
{
"epoch": 2.2800000000000002,
"grad_norm": 0.2288196086883545,
"learning_rate": 4.786096256684492e-05,
"loss": 0.338,
"step": 570
},
{
"epoch": 2.32,
"grad_norm": 0.2444332093000412,
"learning_rate": 4.518716577540107e-05,
"loss": 0.3971,
"step": 580
},
{
"epoch": 2.36,
"grad_norm": 0.26502081751823425,
"learning_rate": 4.251336898395722e-05,
"loss": 0.2905,
"step": 590
},
{
"epoch": 2.392,
"eval_loss": 0.45234861969947815,
"eval_runtime": 967.7709,
"eval_samples_per_second": 0.207,
"eval_steps_per_second": 0.207,
"step": 598
}
],
"logging_steps": 10,
"max_steps": 750,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 299,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.783168801781924e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}