sales-prediction / trainer_state.json
tonyassi's picture
Upload folder using huggingface_hub
5047659 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 940,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10638297872340426,
"grad_norm": 13.33906364440918,
"learning_rate": 9.893617021276596e-05,
"loss": 0.2086,
"step": 10
},
{
"epoch": 0.2127659574468085,
"grad_norm": 4.733145713806152,
"learning_rate": 9.787234042553192e-05,
"loss": 0.1224,
"step": 20
},
{
"epoch": 0.3191489361702128,
"grad_norm": 17.717458724975586,
"learning_rate": 9.680851063829788e-05,
"loss": 0.124,
"step": 30
},
{
"epoch": 0.425531914893617,
"grad_norm": 1.6599293947219849,
"learning_rate": 9.574468085106384e-05,
"loss": 0.0323,
"step": 40
},
{
"epoch": 0.5319148936170213,
"grad_norm": 8.243000030517578,
"learning_rate": 9.468085106382978e-05,
"loss": 0.0454,
"step": 50
},
{
"epoch": 0.6382978723404256,
"grad_norm": 2.7700071334838867,
"learning_rate": 9.361702127659576e-05,
"loss": 0.0255,
"step": 60
},
{
"epoch": 0.7446808510638298,
"grad_norm": 7.994290828704834,
"learning_rate": 9.25531914893617e-05,
"loss": 0.0188,
"step": 70
},
{
"epoch": 0.851063829787234,
"grad_norm": 2.9809985160827637,
"learning_rate": 9.148936170212766e-05,
"loss": 0.0114,
"step": 80
},
{
"epoch": 0.9574468085106383,
"grad_norm": 5.259449005126953,
"learning_rate": 9.042553191489363e-05,
"loss": 0.0137,
"step": 90
},
{
"epoch": 1.0,
"eval_loss": 0.018044685944914818,
"eval_mse": 0.018044687807559967,
"eval_runtime": 14.1245,
"eval_samples_per_second": 13.239,
"eval_steps_per_second": 1.699,
"step": 94
},
{
"epoch": 1.0638297872340425,
"grad_norm": 6.504136562347412,
"learning_rate": 8.936170212765958e-05,
"loss": 0.0242,
"step": 100
},
{
"epoch": 1.1702127659574468,
"grad_norm": 0.9810653924942017,
"learning_rate": 8.829787234042553e-05,
"loss": 0.0411,
"step": 110
},
{
"epoch": 1.2765957446808511,
"grad_norm": 0.9685758352279663,
"learning_rate": 8.723404255319149e-05,
"loss": 0.0112,
"step": 120
},
{
"epoch": 1.3829787234042552,
"grad_norm": 3.027130603790283,
"learning_rate": 8.617021276595745e-05,
"loss": 0.0225,
"step": 130
},
{
"epoch": 1.4893617021276595,
"grad_norm": 0.406287282705307,
"learning_rate": 8.510638297872341e-05,
"loss": 0.0038,
"step": 140
},
{
"epoch": 1.5957446808510638,
"grad_norm": 3.338768482208252,
"learning_rate": 8.404255319148937e-05,
"loss": 0.0109,
"step": 150
},
{
"epoch": 1.702127659574468,
"grad_norm": 1.020302176475525,
"learning_rate": 8.297872340425533e-05,
"loss": 0.0078,
"step": 160
},
{
"epoch": 1.8085106382978724,
"grad_norm": 6.15859317779541,
"learning_rate": 8.191489361702128e-05,
"loss": 0.0213,
"step": 170
},
{
"epoch": 1.9148936170212765,
"grad_norm": 0.39595314860343933,
"learning_rate": 8.085106382978723e-05,
"loss": 0.0103,
"step": 180
},
{
"epoch": 2.0,
"eval_loss": 0.0077727301977574825,
"eval_mse": 0.0077727301977574825,
"eval_runtime": 13.6671,
"eval_samples_per_second": 13.682,
"eval_steps_per_second": 1.756,
"step": 188
},
{
"epoch": 2.021276595744681,
"grad_norm": 5.661103248596191,
"learning_rate": 7.978723404255319e-05,
"loss": 0.0267,
"step": 190
},
{
"epoch": 2.127659574468085,
"grad_norm": 4.488003730773926,
"learning_rate": 7.872340425531916e-05,
"loss": 0.0182,
"step": 200
},
{
"epoch": 2.2340425531914896,
"grad_norm": 1.4571934938430786,
"learning_rate": 7.76595744680851e-05,
"loss": 0.01,
"step": 210
},
{
"epoch": 2.3404255319148937,
"grad_norm": 4.472738742828369,
"learning_rate": 7.659574468085106e-05,
"loss": 0.0191,
"step": 220
},
{
"epoch": 2.4468085106382977,
"grad_norm": 1.574164867401123,
"learning_rate": 7.553191489361703e-05,
"loss": 0.0097,
"step": 230
},
{
"epoch": 2.5531914893617023,
"grad_norm": 1.6892690658569336,
"learning_rate": 7.446808510638298e-05,
"loss": 0.0098,
"step": 240
},
{
"epoch": 2.6595744680851063,
"grad_norm": 0.6197291612625122,
"learning_rate": 7.340425531914894e-05,
"loss": 0.0087,
"step": 250
},
{
"epoch": 2.7659574468085104,
"grad_norm": 1.9168953895568848,
"learning_rate": 7.23404255319149e-05,
"loss": 0.0219,
"step": 260
},
{
"epoch": 2.872340425531915,
"grad_norm": 1.0710517168045044,
"learning_rate": 7.127659574468085e-05,
"loss": 0.016,
"step": 270
},
{
"epoch": 2.978723404255319,
"grad_norm": 1.6727036237716675,
"learning_rate": 7.021276595744681e-05,
"loss": 0.0048,
"step": 280
},
{
"epoch": 3.0,
"eval_loss": 0.007307589519768953,
"eval_mse": 0.007307588588446379,
"eval_runtime": 13.6088,
"eval_samples_per_second": 13.741,
"eval_steps_per_second": 1.764,
"step": 282
},
{
"epoch": 3.0851063829787235,
"grad_norm": 2.2468886375427246,
"learning_rate": 6.914893617021277e-05,
"loss": 0.0169,
"step": 290
},
{
"epoch": 3.1914893617021276,
"grad_norm": 1.962132453918457,
"learning_rate": 6.808510638297873e-05,
"loss": 0.0092,
"step": 300
},
{
"epoch": 3.297872340425532,
"grad_norm": 0.9321272969245911,
"learning_rate": 6.702127659574469e-05,
"loss": 0.0089,
"step": 310
},
{
"epoch": 3.404255319148936,
"grad_norm": 1.55916428565979,
"learning_rate": 6.595744680851063e-05,
"loss": 0.005,
"step": 320
},
{
"epoch": 3.5106382978723403,
"grad_norm": 0.9688326716423035,
"learning_rate": 6.489361702127659e-05,
"loss": 0.0092,
"step": 330
},
{
"epoch": 3.617021276595745,
"grad_norm": 0.5779433250427246,
"learning_rate": 6.382978723404256e-05,
"loss": 0.012,
"step": 340
},
{
"epoch": 3.723404255319149,
"grad_norm": 4.831696510314941,
"learning_rate": 6.276595744680851e-05,
"loss": 0.011,
"step": 350
},
{
"epoch": 3.829787234042553,
"grad_norm": 0.6118608117103577,
"learning_rate": 6.170212765957447e-05,
"loss": 0.0165,
"step": 360
},
{
"epoch": 3.9361702127659575,
"grad_norm": 3.6182706356048584,
"learning_rate": 6.063829787234043e-05,
"loss": 0.0063,
"step": 370
},
{
"epoch": 4.0,
"eval_loss": 0.008724302053451538,
"eval_mse": 0.008724302053451538,
"eval_runtime": 13.5042,
"eval_samples_per_second": 13.848,
"eval_steps_per_second": 1.777,
"step": 376
},
{
"epoch": 4.042553191489362,
"grad_norm": 1.8335098028182983,
"learning_rate": 5.9574468085106384e-05,
"loss": 0.0062,
"step": 380
},
{
"epoch": 4.148936170212766,
"grad_norm": 0.48817679286003113,
"learning_rate": 5.851063829787234e-05,
"loss": 0.0041,
"step": 390
},
{
"epoch": 4.25531914893617,
"grad_norm": 2.2691452503204346,
"learning_rate": 5.744680851063831e-05,
"loss": 0.043,
"step": 400
},
{
"epoch": 4.361702127659575,
"grad_norm": 0.4736071527004242,
"learning_rate": 5.638297872340426e-05,
"loss": 0.0029,
"step": 410
},
{
"epoch": 4.468085106382979,
"grad_norm": 2.42499041557312,
"learning_rate": 5.531914893617022e-05,
"loss": 0.0042,
"step": 420
},
{
"epoch": 4.574468085106383,
"grad_norm": 2.1059014797210693,
"learning_rate": 5.425531914893617e-05,
"loss": 0.005,
"step": 430
},
{
"epoch": 4.680851063829787,
"grad_norm": 2.9217162132263184,
"learning_rate": 5.319148936170213e-05,
"loss": 0.0079,
"step": 440
},
{
"epoch": 4.787234042553192,
"grad_norm": 2.858285427093506,
"learning_rate": 5.212765957446809e-05,
"loss": 0.0071,
"step": 450
},
{
"epoch": 4.8936170212765955,
"grad_norm": 1.6557581424713135,
"learning_rate": 5.1063829787234044e-05,
"loss": 0.0033,
"step": 460
},
{
"epoch": 5.0,
"grad_norm": 1.1606419086456299,
"learning_rate": 5e-05,
"loss": 0.0027,
"step": 470
},
{
"epoch": 5.0,
"eval_loss": 0.009648770093917847,
"eval_mse": 0.009648771025240421,
"eval_runtime": 13.8278,
"eval_samples_per_second": 13.523,
"eval_steps_per_second": 1.736,
"step": 470
},
{
"epoch": 5.1063829787234045,
"grad_norm": 0.7212992310523987,
"learning_rate": 4.893617021276596e-05,
"loss": 0.0046,
"step": 480
},
{
"epoch": 5.212765957446808,
"grad_norm": 3.233711004257202,
"learning_rate": 4.787234042553192e-05,
"loss": 0.0097,
"step": 490
},
{
"epoch": 5.319148936170213,
"grad_norm": 0.9804595708847046,
"learning_rate": 4.680851063829788e-05,
"loss": 0.0051,
"step": 500
},
{
"epoch": 5.425531914893617,
"grad_norm": 1.4426085948944092,
"learning_rate": 4.574468085106383e-05,
"loss": 0.0043,
"step": 510
},
{
"epoch": 5.531914893617021,
"grad_norm": 0.7456052303314209,
"learning_rate": 4.468085106382979e-05,
"loss": 0.0028,
"step": 520
},
{
"epoch": 5.638297872340425,
"grad_norm": 0.5527386665344238,
"learning_rate": 4.3617021276595746e-05,
"loss": 0.0158,
"step": 530
},
{
"epoch": 5.74468085106383,
"grad_norm": 0.5531327724456787,
"learning_rate": 4.2553191489361704e-05,
"loss": 0.0015,
"step": 540
},
{
"epoch": 5.851063829787234,
"grad_norm": 1.6009923219680786,
"learning_rate": 4.148936170212766e-05,
"loss": 0.0092,
"step": 550
},
{
"epoch": 5.957446808510638,
"grad_norm": 2.4224541187286377,
"learning_rate": 4.0425531914893614e-05,
"loss": 0.0045,
"step": 560
},
{
"epoch": 6.0,
"eval_loss": 0.011005881242454052,
"eval_mse": 0.011005881242454052,
"eval_runtime": 13.5501,
"eval_samples_per_second": 13.801,
"eval_steps_per_second": 1.771,
"step": 564
},
{
"epoch": 6.0638297872340425,
"grad_norm": 0.6714192032814026,
"learning_rate": 3.936170212765958e-05,
"loss": 0.0043,
"step": 570
},
{
"epoch": 6.170212765957447,
"grad_norm": 1.744354009628296,
"learning_rate": 3.829787234042553e-05,
"loss": 0.0041,
"step": 580
},
{
"epoch": 6.276595744680851,
"grad_norm": 1.0487537384033203,
"learning_rate": 3.723404255319149e-05,
"loss": 0.0018,
"step": 590
},
{
"epoch": 6.382978723404255,
"grad_norm": 0.8574402928352356,
"learning_rate": 3.617021276595745e-05,
"loss": 0.0011,
"step": 600
},
{
"epoch": 6.48936170212766,
"grad_norm": 0.481860876083374,
"learning_rate": 3.5106382978723407e-05,
"loss": 0.0013,
"step": 610
},
{
"epoch": 6.595744680851064,
"grad_norm": 0.45897528529167175,
"learning_rate": 3.4042553191489365e-05,
"loss": 0.0012,
"step": 620
},
{
"epoch": 6.702127659574468,
"grad_norm": 0.9576705098152161,
"learning_rate": 3.2978723404255317e-05,
"loss": 0.0027,
"step": 630
},
{
"epoch": 6.808510638297872,
"grad_norm": 2.015152931213379,
"learning_rate": 3.191489361702128e-05,
"loss": 0.0027,
"step": 640
},
{
"epoch": 6.914893617021277,
"grad_norm": 0.8357438445091248,
"learning_rate": 3.085106382978723e-05,
"loss": 0.007,
"step": 650
},
{
"epoch": 7.0,
"eval_loss": 0.00820625014603138,
"eval_mse": 0.008206251077353954,
"eval_runtime": 13.4405,
"eval_samples_per_second": 13.913,
"eval_steps_per_second": 1.786,
"step": 658
},
{
"epoch": 7.0212765957446805,
"grad_norm": 0.8114696741104126,
"learning_rate": 2.9787234042553192e-05,
"loss": 0.0049,
"step": 660
},
{
"epoch": 7.127659574468085,
"grad_norm": 0.1766444891691208,
"learning_rate": 2.8723404255319154e-05,
"loss": 0.0016,
"step": 670
},
{
"epoch": 7.23404255319149,
"grad_norm": 0.25129997730255127,
"learning_rate": 2.765957446808511e-05,
"loss": 0.0009,
"step": 680
},
{
"epoch": 7.340425531914893,
"grad_norm": 0.1703944355249405,
"learning_rate": 2.6595744680851064e-05,
"loss": 0.0009,
"step": 690
},
{
"epoch": 7.446808510638298,
"grad_norm": 0.7361921072006226,
"learning_rate": 2.5531914893617022e-05,
"loss": 0.0013,
"step": 700
},
{
"epoch": 7.553191489361702,
"grad_norm": 0.06729819625616074,
"learning_rate": 2.446808510638298e-05,
"loss": 0.0008,
"step": 710
},
{
"epoch": 7.659574468085106,
"grad_norm": 0.6885775327682495,
"learning_rate": 2.340425531914894e-05,
"loss": 0.0018,
"step": 720
},
{
"epoch": 7.76595744680851,
"grad_norm": 0.28569063544273376,
"learning_rate": 2.2340425531914894e-05,
"loss": 0.0014,
"step": 730
},
{
"epoch": 7.872340425531915,
"grad_norm": 1.3078858852386475,
"learning_rate": 2.1276595744680852e-05,
"loss": 0.0014,
"step": 740
},
{
"epoch": 7.9787234042553195,
"grad_norm": 0.32889997959136963,
"learning_rate": 2.0212765957446807e-05,
"loss": 0.006,
"step": 750
},
{
"epoch": 8.0,
"eval_loss": 0.007531557697802782,
"eval_mse": 0.00753155630081892,
"eval_runtime": 13.5891,
"eval_samples_per_second": 13.761,
"eval_steps_per_second": 1.766,
"step": 752
},
{
"epoch": 8.085106382978724,
"grad_norm": 1.3261306285858154,
"learning_rate": 1.9148936170212766e-05,
"loss": 0.0046,
"step": 760
},
{
"epoch": 8.191489361702128,
"grad_norm": 0.37409037351608276,
"learning_rate": 1.8085106382978724e-05,
"loss": 0.0106,
"step": 770
},
{
"epoch": 8.297872340425531,
"grad_norm": 1.879095196723938,
"learning_rate": 1.7021276595744682e-05,
"loss": 0.0024,
"step": 780
},
{
"epoch": 8.404255319148936,
"grad_norm": 1.2030478715896606,
"learning_rate": 1.595744680851064e-05,
"loss": 0.0009,
"step": 790
},
{
"epoch": 8.51063829787234,
"grad_norm": 0.20317141711711884,
"learning_rate": 1.4893617021276596e-05,
"loss": 0.0049,
"step": 800
},
{
"epoch": 8.617021276595745,
"grad_norm": 0.6573496460914612,
"learning_rate": 1.3829787234042554e-05,
"loss": 0.0013,
"step": 810
},
{
"epoch": 8.72340425531915,
"grad_norm": 0.3387906551361084,
"learning_rate": 1.2765957446808511e-05,
"loss": 0.0005,
"step": 820
},
{
"epoch": 8.829787234042554,
"grad_norm": 0.10428553074598312,
"learning_rate": 1.170212765957447e-05,
"loss": 0.0009,
"step": 830
},
{
"epoch": 8.936170212765958,
"grad_norm": 0.5624672174453735,
"learning_rate": 1.0638297872340426e-05,
"loss": 0.001,
"step": 840
},
{
"epoch": 9.0,
"eval_loss": 0.007717251777648926,
"eval_mse": 0.0077172513119876385,
"eval_runtime": 13.5895,
"eval_samples_per_second": 13.761,
"eval_steps_per_second": 1.766,
"step": 846
},
{
"epoch": 9.042553191489361,
"grad_norm": 0.4665082097053528,
"learning_rate": 9.574468085106383e-06,
"loss": 0.0008,
"step": 850
},
{
"epoch": 9.148936170212766,
"grad_norm": 0.2608925998210907,
"learning_rate": 8.510638297872341e-06,
"loss": 0.0005,
"step": 860
},
{
"epoch": 9.25531914893617,
"grad_norm": 0.5921544432640076,
"learning_rate": 7.446808510638298e-06,
"loss": 0.0005,
"step": 870
},
{
"epoch": 9.361702127659575,
"grad_norm": 0.07455885410308838,
"learning_rate": 6.3829787234042555e-06,
"loss": 0.0005,
"step": 880
},
{
"epoch": 9.46808510638298,
"grad_norm": 0.2597995400428772,
"learning_rate": 5.319148936170213e-06,
"loss": 0.0002,
"step": 890
},
{
"epoch": 9.574468085106384,
"grad_norm": 0.1356208473443985,
"learning_rate": 4.255319148936171e-06,
"loss": 0.0003,
"step": 900
},
{
"epoch": 9.680851063829786,
"grad_norm": 0.5684856176376343,
"learning_rate": 3.1914893617021277e-06,
"loss": 0.0016,
"step": 910
},
{
"epoch": 9.787234042553191,
"grad_norm": 0.3895494341850281,
"learning_rate": 2.1276595744680853e-06,
"loss": 0.0054,
"step": 920
},
{
"epoch": 9.893617021276595,
"grad_norm": 0.1405140459537506,
"learning_rate": 1.0638297872340427e-06,
"loss": 0.0003,
"step": 930
},
{
"epoch": 10.0,
"grad_norm": 0.5188348889350891,
"learning_rate": 0.0,
"loss": 0.0005,
"step": 940
}
],
"logging_steps": 10,
"max_steps": 940,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}