test2 / trainer_state.json
bimabk's picture
Upload task output 0fe99f84-0038-4cec-8e61-1eb9fea8dc55
d057f26 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03571428571428571,
"grad_norm": 19.5,
"learning_rate": 1.5838110992819075e-05,
"loss": 4.4082,
"step": 5
},
{
"epoch": 0.07142857142857142,
"grad_norm": 11.6875,
"learning_rate": 3.5635749733842914e-05,
"loss": 3.2859,
"step": 10
},
{
"epoch": 0.10714285714285714,
"grad_norm": 8.1875,
"learning_rate": 5.5433388474866766e-05,
"loss": 2.8128,
"step": 15
},
{
"epoch": 0.14285714285714285,
"grad_norm": 6.3125,
"learning_rate": 7.52310272158906e-05,
"loss": 2.6378,
"step": 20
},
{
"epoch": 0.17857142857142858,
"grad_norm": 5.3125,
"learning_rate": 9.502866595691445e-05,
"loss": 2.4924,
"step": 25
},
{
"epoch": 0.21428571428571427,
"grad_norm": 5.84375,
"learning_rate": 0.0001148263046979383,
"loss": 2.4994,
"step": 30
},
{
"epoch": 0.25,
"grad_norm": 5.125,
"learning_rate": 0.00013462394343896213,
"loss": 2.4877,
"step": 35
},
{
"epoch": 0.2857142857142857,
"grad_norm": 5.3125,
"learning_rate": 0.00013855579078350677,
"loss": 2.5123,
"step": 40
},
{
"epoch": 0.32142857142857145,
"grad_norm": 4.03125,
"learning_rate": 0.0001384433896787117,
"loss": 2.453,
"step": 45
},
{
"epoch": 0.35714285714285715,
"grad_norm": 4.5625,
"learning_rate": 0.00013824472478409896,
"loss": 2.4429,
"step": 50
},
{
"epoch": 0.39285714285714285,
"grad_norm": 3.75,
"learning_rate": 0.00013796012675777958,
"loss": 2.4012,
"step": 55
},
{
"epoch": 0.42857142857142855,
"grad_norm": 3.234375,
"learning_rate": 0.00013759006928508162,
"loss": 2.351,
"step": 60
},
{
"epoch": 0.4642857142857143,
"grad_norm": 3.71875,
"learning_rate": 0.00013713516829014772,
"loss": 2.3272,
"step": 65
},
{
"epoch": 0.5,
"grad_norm": 3.875,
"learning_rate": 0.00013659618091079015,
"loss": 2.3015,
"step": 70
},
{
"epoch": 0.5357142857142857,
"grad_norm": 3.171875,
"learning_rate": 0.00013597400423830962,
"loss": 2.2773,
"step": 75
},
{
"epoch": 0.5714285714285714,
"grad_norm": 3.359375,
"learning_rate": 0.00013526967382437543,
"loss": 2.2415,
"step": 80
},
{
"epoch": 0.6071428571428571,
"grad_norm": 2.9375,
"learning_rate": 0.00013448436195745173,
"loss": 2.1806,
"step": 85
},
{
"epoch": 0.6428571428571429,
"grad_norm": 2.8125,
"learning_rate": 0.00013361937571163926,
"loss": 2.1731,
"step": 90
},
{
"epoch": 0.6785714285714286,
"grad_norm": 3.015625,
"learning_rate": 0.00013267615477117944,
"loss": 2.2264,
"step": 95
},
{
"epoch": 0.7142857142857143,
"grad_norm": 2.75,
"learning_rate": 0.0001316562690342419,
"loss": 2.1964,
"step": 100
},
{
"epoch": 0.75,
"grad_norm": 2.71875,
"learning_rate": 0.00013056141599998415,
"loss": 2.2052,
"step": 105
},
{
"epoch": 0.7857142857142857,
"grad_norm": 3.21875,
"learning_rate": 0.00012939341794323162,
"loss": 2.1698,
"step": 110
},
{
"epoch": 0.8214285714285714,
"grad_norm": 2.734375,
"learning_rate": 0.00012815421888148093,
"loss": 2.1723,
"step": 115
},
{
"epoch": 0.8571428571428571,
"grad_norm": 2.515625,
"learning_rate": 0.0001268458813392749,
"loss": 2.1478,
"step": 120
},
{
"epoch": 0.8928571428571429,
"grad_norm": 2.546875,
"learning_rate": 0.00012547058291533382,
"loss": 2.0849,
"step": 125
},
{
"epoch": 0.9285714285714286,
"grad_norm": 2.640625,
"learning_rate": 0.00012403061265815721,
"loss": 2.1297,
"step": 130
},
{
"epoch": 0.9642857142857143,
"grad_norm": 2.5,
"learning_rate": 0.00012252836725612872,
"loss": 2.0767,
"step": 135
},
{
"epoch": 1.0,
"grad_norm": 2.34375,
"learning_rate": 0.00012096634704846468,
"loss": 2.1226,
"step": 140
},
{
"epoch": 1.0,
"eval_loss": 2.0443668365478516,
"eval_runtime": 0.4691,
"eval_samples_per_second": 10.658,
"eval_steps_per_second": 10.658,
"step": 140
},
{
"epoch": 1.0357142857142858,
"grad_norm": 2.5625,
"learning_rate": 0.00011934715186364626,
"loss": 1.7431,
"step": 145
},
{
"epoch": 1.0714285714285714,
"grad_norm": 2.390625,
"learning_rate": 0.00011767347669226159,
"loss": 1.6931,
"step": 150
},
{
"epoch": 1.1071428571428572,
"grad_norm": 2.40625,
"learning_rate": 0.00011594810720145982,
"loss": 1.6984,
"step": 155
},
{
"epoch": 1.1428571428571428,
"grad_norm": 2.46875,
"learning_rate": 0.00011417391509848311,
"loss": 1.7018,
"step": 160
},
{
"epoch": 1.1785714285714286,
"grad_norm": 2.140625,
"learning_rate": 0.00011235385335099333,
"loss": 1.7014,
"step": 165
},
{
"epoch": 1.2142857142857142,
"grad_norm": 2.65625,
"learning_rate": 0.00011049095127214879,
"loss": 1.7539,
"step": 170
},
{
"epoch": 1.25,
"grad_norm": 2.65625,
"learning_rate": 0.00010858830947861153,
"loss": 1.7273,
"step": 175
},
{
"epoch": 1.2857142857142856,
"grad_norm": 2.375,
"learning_rate": 0.00010664909472987668,
"loss": 1.7267,
"step": 180
},
{
"epoch": 1.3214285714285714,
"grad_norm": 2.484375,
"learning_rate": 0.00010467653465751409,
"loss": 1.7098,
"step": 185
},
{
"epoch": 1.3571428571428572,
"grad_norm": 2.359375,
"learning_rate": 0.00010267391239309385,
"loss": 1.7123,
"step": 190
},
{
"epoch": 1.3928571428571428,
"grad_norm": 2.25,
"learning_rate": 0.00010064456110373801,
"loss": 1.7539,
"step": 195
},
{
"epoch": 1.4285714285714286,
"grad_norm": 2.34375,
"learning_rate": 9.859185844439268e-05,
"loss": 1.7102,
"step": 200
},
{
"epoch": 1.4642857142857144,
"grad_norm": 2.203125,
"learning_rate": 9.651922093605485e-05,
"loss": 1.6981,
"step": 205
},
{
"epoch": 1.5,
"grad_norm": 2.171875,
"learning_rate": 9.443009827931028e-05,
"loss": 1.7112,
"step": 210
},
{
"epoch": 1.5357142857142856,
"grad_norm": 2.234375,
"learning_rate": 9.232796761264735e-05,
"loss": 1.7439,
"step": 215
},
{
"epoch": 1.5714285714285714,
"grad_norm": 2.21875,
"learning_rate": 9.021632772510318e-05,
"loss": 1.7149,
"step": 220
},
{
"epoch": 1.6071428571428572,
"grad_norm": 2.109375,
"learning_rate": 8.809869323287461e-05,
"loss": 1.6947,
"step": 225
},
{
"epoch": 1.6428571428571428,
"grad_norm": 2.296875,
"learning_rate": 8.597858872958643e-05,
"loss": 1.6722,
"step": 230
},
{
"epoch": 1.6785714285714286,
"grad_norm": 2.140625,
"learning_rate": 8.385954291995294e-05,
"loss": 1.6419,
"step": 235
},
{
"epoch": 1.7142857142857144,
"grad_norm": 2.0,
"learning_rate": 8.174508274659743e-05,
"loss": 1.6508,
"step": 240
},
{
"epoch": 1.75,
"grad_norm": 2.03125,
"learning_rate": 7.963872751980418e-05,
"loss": 1.6213,
"step": 245
},
{
"epoch": 1.7857142857142856,
"grad_norm": 2.109375,
"learning_rate": 7.754398305997372e-05,
"loss": 1.6257,
"step": 250
},
{
"epoch": 1.8214285714285714,
"grad_norm": 2.109375,
"learning_rate": 7.546433586253086e-05,
"loss": 1.5915,
"step": 255
},
{
"epoch": 1.8571428571428572,
"grad_norm": 1.890625,
"learning_rate": 7.340324729499708e-05,
"loss": 1.6334,
"step": 260
},
{
"epoch": 1.8928571428571428,
"grad_norm": 1.96875,
"learning_rate": 7.136414783588594e-05,
"loss": 1.6345,
"step": 265
},
{
"epoch": 1.9285714285714286,
"grad_norm": 2.015625,
"learning_rate": 6.935043136501011e-05,
"loss": 1.6109,
"step": 270
},
{
"epoch": 1.9642857142857144,
"grad_norm": 2.09375,
"learning_rate": 6.736544951470336e-05,
"loss": 1.6274,
"step": 275
},
{
"epoch": 2.0,
"grad_norm": 2.109375,
"learning_rate": 6.541250609135935e-05,
"loss": 1.6177,
"step": 280
},
{
"epoch": 2.0,
"eval_loss": 1.8396857976913452,
"eval_runtime": 0.3556,
"eval_samples_per_second": 14.061,
"eval_steps_per_second": 14.061,
"step": 280
}
],
"logging_steps": 5,
"max_steps": 420,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.6551816128692224e+17,
"train_batch_size": 48,
"trial_name": null,
"trial_params": null
}