chansung's picture
Model save
abdc5bb verified
raw
history blame
No virus
26.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 13.333333333333334,
"eval_steps": 500,
"global_step": 700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01904761904761905,
"grad_norm": 3.15625,
"learning_rate": 2.564102564102564e-06,
"loss": 3.3205,
"step": 1
},
{
"epoch": 0.09523809523809523,
"grad_norm": 4.34375,
"learning_rate": 1.282051282051282e-05,
"loss": 3.3126,
"step": 5
},
{
"epoch": 0.19047619047619047,
"grad_norm": 2.609375,
"learning_rate": 2.564102564102564e-05,
"loss": 3.2719,
"step": 10
},
{
"epoch": 0.2857142857142857,
"grad_norm": 2.828125,
"learning_rate": 3.846153846153846e-05,
"loss": 3.1205,
"step": 15
},
{
"epoch": 0.38095238095238093,
"grad_norm": 2.03125,
"learning_rate": 5.128205128205128e-05,
"loss": 2.8114,
"step": 20
},
{
"epoch": 0.47619047619047616,
"grad_norm": 2.125,
"learning_rate": 6.410256410256412e-05,
"loss": 2.5384,
"step": 25
},
{
"epoch": 0.5714285714285714,
"grad_norm": 6.03125,
"learning_rate": 7.692307692307693e-05,
"loss": 2.3201,
"step": 30
},
{
"epoch": 0.6666666666666666,
"grad_norm": 2.0625,
"learning_rate": 8.974358974358975e-05,
"loss": 2.1053,
"step": 35
},
{
"epoch": 0.7619047619047619,
"grad_norm": 1.359375,
"learning_rate": 0.00010256410256410256,
"loss": 1.8717,
"step": 40
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.54296875,
"learning_rate": 0.00011538461538461538,
"loss": 1.6592,
"step": 45
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.427734375,
"learning_rate": 0.00012820512820512823,
"loss": 1.518,
"step": 50
},
{
"epoch": 0.9904761904761905,
"eval_loss": 2.7708818912506104,
"eval_runtime": 0.4987,
"eval_samples_per_second": 42.112,
"eval_steps_per_second": 2.005,
"step": 52
},
{
"epoch": 1.0476190476190477,
"grad_norm": 0.396484375,
"learning_rate": 0.00014102564102564104,
"loss": 1.426,
"step": 55
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.51953125,
"learning_rate": 0.00015384615384615385,
"loss": 1.3513,
"step": 60
},
{
"epoch": 1.2380952380952381,
"grad_norm": 0.75390625,
"learning_rate": 0.0001666666666666667,
"loss": 1.3036,
"step": 65
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.703125,
"learning_rate": 0.0001794871794871795,
"loss": 1.2627,
"step": 70
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.6015625,
"learning_rate": 0.00019230769230769233,
"loss": 1.2335,
"step": 75
},
{
"epoch": 1.5238095238095237,
"grad_norm": 0.31640625,
"learning_rate": 0.00019999599453798524,
"loss": 1.2094,
"step": 80
},
{
"epoch": 1.619047619047619,
"grad_norm": 0.359375,
"learning_rate": 0.0001999509367752813,
"loss": 1.1957,
"step": 85
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.80859375,
"learning_rate": 0.00019985583705641418,
"loss": 1.1772,
"step": 90
},
{
"epoch": 1.8095238095238095,
"grad_norm": 0.5,
"learning_rate": 0.0001997107429945041,
"loss": 1.1595,
"step": 95
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.48046875,
"learning_rate": 0.0001995157272330992,
"loss": 1.1451,
"step": 100
},
{
"epoch": 2.0,
"grad_norm": 0.408203125,
"learning_rate": 0.0001992708874098054,
"loss": 1.1423,
"step": 105
},
{
"epoch": 2.0,
"eval_loss": 2.659493923187256,
"eval_runtime": 0.4877,
"eval_samples_per_second": 43.055,
"eval_steps_per_second": 2.05,
"step": 105
},
{
"epoch": 2.0952380952380953,
"grad_norm": 0.419921875,
"learning_rate": 0.00019897634610740287,
"loss": 1.1131,
"step": 110
},
{
"epoch": 2.1904761904761907,
"grad_norm": 0.50390625,
"learning_rate": 0.00019863225079247285,
"loss": 1.1121,
"step": 115
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.333984375,
"learning_rate": 0.00019823877374156647,
"loss": 1.103,
"step": 120
},
{
"epoch": 2.380952380952381,
"grad_norm": 0.5859375,
"learning_rate": 0.00019779611195495177,
"loss": 1.1023,
"step": 125
},
{
"epoch": 2.4761904761904763,
"grad_norm": 0.95703125,
"learning_rate": 0.00019730448705798239,
"loss": 1.086,
"step": 130
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.392578125,
"learning_rate": 0.00019676414519013781,
"loss": 1.0886,
"step": 135
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.70703125,
"learning_rate": 0.0001961753568817896,
"loss": 1.0818,
"step": 140
},
{
"epoch": 2.761904761904762,
"grad_norm": 0.49609375,
"learning_rate": 0.0001955384169187563,
"loss": 1.0799,
"step": 145
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.71484375,
"learning_rate": 0.00019485364419471454,
"loss": 1.0792,
"step": 150
},
{
"epoch": 2.9523809523809526,
"grad_norm": 0.267578125,
"learning_rate": 0.00019412138155154002,
"loss": 1.0681,
"step": 155
},
{
"epoch": 2.9904761904761905,
"eval_loss": 2.6406373977661133,
"eval_runtime": 0.4936,
"eval_samples_per_second": 42.54,
"eval_steps_per_second": 2.026,
"step": 157
},
{
"epoch": 3.0476190476190474,
"grad_norm": 0.291015625,
"learning_rate": 0.0001933419956076584,
"loss": 1.0607,
"step": 160
},
{
"epoch": 3.142857142857143,
"grad_norm": 0.384765625,
"learning_rate": 0.00019251587657449236,
"loss": 1.0467,
"step": 165
},
{
"epoch": 3.238095238095238,
"grad_norm": 0.2578125,
"learning_rate": 0.00019164343806109632,
"loss": 1.0429,
"step": 170
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.28125,
"learning_rate": 0.00019072511686707663,
"loss": 1.0477,
"step": 175
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.23046875,
"learning_rate": 0.0001897613727639014,
"loss": 1.0407,
"step": 180
},
{
"epoch": 3.5238095238095237,
"grad_norm": 0.390625,
"learning_rate": 0.00018875268826470872,
"loss": 1.0408,
"step": 185
},
{
"epoch": 3.619047619047619,
"grad_norm": 0.3046875,
"learning_rate": 0.00018769956838272936,
"loss": 1.0392,
"step": 190
},
{
"epoch": 3.7142857142857144,
"grad_norm": 0.3203125,
"learning_rate": 0.00018660254037844388,
"loss": 1.0393,
"step": 195
},
{
"epoch": 3.8095238095238093,
"grad_norm": 0.32421875,
"learning_rate": 0.00018546215349560203,
"loss": 1.0385,
"step": 200
},
{
"epoch": 3.9047619047619047,
"grad_norm": 0.828125,
"learning_rate": 0.00018427897868623534,
"loss": 1.0383,
"step": 205
},
{
"epoch": 4.0,
"grad_norm": 0.4375,
"learning_rate": 0.00018305360832480117,
"loss": 1.0335,
"step": 210
},
{
"epoch": 4.0,
"eval_loss": 2.642651319503784,
"eval_runtime": 0.4862,
"eval_samples_per_second": 43.193,
"eval_steps_per_second": 2.057,
"step": 210
},
{
"epoch": 4.095238095238095,
"grad_norm": 0.291015625,
"learning_rate": 0.00018178665591160172,
"loss": 1.0188,
"step": 215
},
{
"epoch": 4.190476190476191,
"grad_norm": 0.27734375,
"learning_rate": 0.00018047875576562557,
"loss": 1.0134,
"step": 220
},
{
"epoch": 4.285714285714286,
"grad_norm": 0.255859375,
"learning_rate": 0.0001791305627069662,
"loss": 1.0109,
"step": 225
},
{
"epoch": 4.380952380952381,
"grad_norm": 0.265625,
"learning_rate": 0.0001777427517289766,
"loss": 1.009,
"step": 230
},
{
"epoch": 4.476190476190476,
"grad_norm": 0.2890625,
"learning_rate": 0.00017631601766032336,
"loss": 1.0116,
"step": 235
},
{
"epoch": 4.571428571428571,
"grad_norm": 0.5,
"learning_rate": 0.00017485107481711012,
"loss": 1.0067,
"step": 240
},
{
"epoch": 4.666666666666667,
"grad_norm": 0.58203125,
"learning_rate": 0.0001733486566452446,
"loss": 1.0112,
"step": 245
},
{
"epoch": 4.761904761904762,
"grad_norm": 0.40234375,
"learning_rate": 0.0001718095153532274,
"loss": 1.0096,
"step": 250
},
{
"epoch": 4.857142857142857,
"grad_norm": 0.67578125,
"learning_rate": 0.00017023442153554777,
"loss": 1.0046,
"step": 255
},
{
"epoch": 4.9523809523809526,
"grad_norm": 0.76953125,
"learning_rate": 0.0001686241637868734,
"loss": 1.0079,
"step": 260
},
{
"epoch": 4.9904761904761905,
"eval_loss": 2.645949363708496,
"eval_runtime": 0.4992,
"eval_samples_per_second": 42.069,
"eval_steps_per_second": 2.003,
"step": 262
},
{
"epoch": 5.0476190476190474,
"grad_norm": 0.310546875,
"learning_rate": 0.00016697954830722868,
"loss": 0.9948,
"step": 265
},
{
"epoch": 5.142857142857143,
"grad_norm": 0.34375,
"learning_rate": 0.0001653013984983585,
"loss": 0.9868,
"step": 270
},
{
"epoch": 5.238095238095238,
"grad_norm": 0.5625,
"learning_rate": 0.0001635905545514795,
"loss": 0.9845,
"step": 275
},
{
"epoch": 5.333333333333333,
"grad_norm": 0.5859375,
"learning_rate": 0.0001618478730266255,
"loss": 0.9812,
"step": 280
},
{
"epoch": 5.428571428571429,
"grad_norm": 0.65625,
"learning_rate": 0.0001600742264237979,
"loss": 0.9887,
"step": 285
},
{
"epoch": 5.523809523809524,
"grad_norm": 0.357421875,
"learning_rate": 0.00015827050274613513,
"loss": 0.9863,
"step": 290
},
{
"epoch": 5.619047619047619,
"grad_norm": 0.30078125,
"learning_rate": 0.0001564376050553205,
"loss": 0.9862,
"step": 295
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.275390625,
"learning_rate": 0.00015457645101945046,
"loss": 0.9865,
"step": 300
},
{
"epoch": 5.809523809523809,
"grad_norm": 0.337890625,
"learning_rate": 0.00015268797245359035,
"loss": 0.9929,
"step": 305
},
{
"epoch": 5.904761904761905,
"grad_norm": 0.294921875,
"learning_rate": 0.0001507731148532468,
"loss": 0.9868,
"step": 310
},
{
"epoch": 6.0,
"grad_norm": 0.32421875,
"learning_rate": 0.00014883283692099112,
"loss": 0.9837,
"step": 315
},
{
"epoch": 6.0,
"eval_loss": 2.657383441925049,
"eval_runtime": 0.4893,
"eval_samples_per_second": 42.915,
"eval_steps_per_second": 2.044,
"step": 315
},
{
"epoch": 6.095238095238095,
"grad_norm": 0.294921875,
"learning_rate": 0.00014686811008647038,
"loss": 0.9669,
"step": 320
},
{
"epoch": 6.190476190476191,
"grad_norm": 0.287109375,
"learning_rate": 0.00014487991802004623,
"loss": 0.9674,
"step": 325
},
{
"epoch": 6.285714285714286,
"grad_norm": 0.3046875,
"learning_rate": 0.00014286925614030542,
"loss": 0.9712,
"step": 330
},
{
"epoch": 6.380952380952381,
"grad_norm": 0.28515625,
"learning_rate": 0.00014083713111568842,
"loss": 0.9663,
"step": 335
},
{
"epoch": 6.476190476190476,
"grad_norm": 0.462890625,
"learning_rate": 0.0001387845603604855,
"loss": 0.9674,
"step": 340
},
{
"epoch": 6.571428571428571,
"grad_norm": 0.54296875,
"learning_rate": 0.00013671257152545277,
"loss": 0.9732,
"step": 345
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.490234375,
"learning_rate": 0.00013462220198330328,
"loss": 0.9743,
"step": 350
},
{
"epoch": 6.761904761904762,
"grad_norm": 0.296875,
"learning_rate": 0.0001325144983093305,
"loss": 0.9704,
"step": 355
},
{
"epoch": 6.857142857142857,
"grad_norm": 0.326171875,
"learning_rate": 0.0001303905157574247,
"loss": 0.9617,
"step": 360
},
{
"epoch": 6.9523809523809526,
"grad_norm": 0.3984375,
"learning_rate": 0.0001282513177317437,
"loss": 0.966,
"step": 365
},
{
"epoch": 6.9904761904761905,
"eval_loss": 2.6700327396392822,
"eval_runtime": 0.5365,
"eval_samples_per_second": 39.146,
"eval_steps_per_second": 1.864,
"step": 367
},
{
"epoch": 7.0476190476190474,
"grad_norm": 0.294921875,
"learning_rate": 0.00012609797525430373,
"loss": 0.9592,
"step": 370
},
{
"epoch": 7.142857142857143,
"grad_norm": 0.3203125,
"learning_rate": 0.0001239315664287558,
"loss": 0.9535,
"step": 375
},
{
"epoch": 7.238095238095238,
"grad_norm": 0.26953125,
"learning_rate": 0.00012175317590061674,
"loss": 0.9528,
"step": 380
},
{
"epoch": 7.333333333333333,
"grad_norm": 0.416015625,
"learning_rate": 0.00011956389431422507,
"loss": 0.9572,
"step": 385
},
{
"epoch": 7.428571428571429,
"grad_norm": 0.341796875,
"learning_rate": 0.00011736481776669306,
"loss": 0.9461,
"step": 390
},
{
"epoch": 7.523809523809524,
"grad_norm": 0.30078125,
"learning_rate": 0.00011515704725912926,
"loss": 0.9513,
"step": 395
},
{
"epoch": 7.619047619047619,
"grad_norm": 0.3125,
"learning_rate": 0.00011294168814540553,
"loss": 0.9566,
"step": 400
},
{
"epoch": 7.714285714285714,
"grad_norm": 0.609375,
"learning_rate": 0.00011071984957874479,
"loss": 0.9594,
"step": 405
},
{
"epoch": 7.809523809523809,
"grad_norm": 0.296875,
"learning_rate": 0.00010849264395640649,
"loss": 0.9554,
"step": 410
},
{
"epoch": 7.904761904761905,
"grad_norm": 0.33203125,
"learning_rate": 0.0001062611863627482,
"loss": 0.9479,
"step": 415
},
{
"epoch": 8.0,
"grad_norm": 0.353515625,
"learning_rate": 0.00010402659401094152,
"loss": 0.9474,
"step": 420
},
{
"epoch": 8.0,
"eval_loss": 2.6798880100250244,
"eval_runtime": 0.4848,
"eval_samples_per_second": 43.314,
"eval_steps_per_second": 2.063,
"step": 420
},
{
"epoch": 8.095238095238095,
"grad_norm": 0.404296875,
"learning_rate": 0.00010178998568362243,
"loss": 0.9432,
"step": 425
},
{
"epoch": 8.19047619047619,
"grad_norm": 0.26953125,
"learning_rate": 9.955248117275566e-05,
"loss": 0.9398,
"step": 430
},
{
"epoch": 8.285714285714286,
"grad_norm": 0.326171875,
"learning_rate": 9.73152007189939e-05,
"loss": 0.9348,
"step": 435
},
{
"epoch": 8.380952380952381,
"grad_norm": 0.30078125,
"learning_rate": 9.507926445081219e-05,
"loss": 0.9416,
"step": 440
},
{
"epoch": 8.476190476190476,
"grad_norm": 0.28515625,
"learning_rate": 9.284579182369867e-05,
"loss": 0.9352,
"step": 445
},
{
"epoch": 8.571428571428571,
"grad_norm": 0.283203125,
"learning_rate": 9.061590105968208e-05,
"loss": 0.9427,
"step": 450
},
{
"epoch": 8.666666666666666,
"grad_norm": 0.283203125,
"learning_rate": 8.839070858747697e-05,
"loss": 0.9413,
"step": 455
},
{
"epoch": 8.761904761904763,
"grad_norm": 0.294921875,
"learning_rate": 8.617132848352671e-05,
"loss": 0.9409,
"step": 460
},
{
"epoch": 8.857142857142858,
"grad_norm": 0.2890625,
"learning_rate": 8.395887191422397e-05,
"loss": 0.942,
"step": 465
},
{
"epoch": 8.952380952380953,
"grad_norm": 0.298828125,
"learning_rate": 8.175444657958876e-05,
"loss": 0.9406,
"step": 470
},
{
"epoch": 8.99047619047619,
"eval_loss": 2.688331365585327,
"eval_runtime": 0.5482,
"eval_samples_per_second": 38.309,
"eval_steps_per_second": 1.824,
"step": 472
},
{
"epoch": 9.047619047619047,
"grad_norm": 0.302734375,
"learning_rate": 7.955915615868111e-05,
"loss": 0.9375,
"step": 475
},
{
"epoch": 9.142857142857142,
"grad_norm": 0.275390625,
"learning_rate": 7.73740997570278e-05,
"loss": 0.9322,
"step": 480
},
{
"epoch": 9.238095238095237,
"grad_norm": 0.26171875,
"learning_rate": 7.520037135633816e-05,
"loss": 0.9292,
"step": 485
},
{
"epoch": 9.333333333333334,
"grad_norm": 0.287109375,
"learning_rate": 7.303905926678564e-05,
"loss": 0.9269,
"step": 490
},
{
"epoch": 9.428571428571429,
"grad_norm": 0.341796875,
"learning_rate": 7.089124558212871e-05,
"loss": 0.9331,
"step": 495
},
{
"epoch": 9.523809523809524,
"grad_norm": 0.28125,
"learning_rate": 6.875800563794425e-05,
"loss": 0.9344,
"step": 500
},
{
"epoch": 9.619047619047619,
"grad_norm": 0.28515625,
"learning_rate": 6.664040747324437e-05,
"loss": 0.9294,
"step": 505
},
{
"epoch": 9.714285714285714,
"grad_norm": 0.267578125,
"learning_rate": 6.453951129574644e-05,
"loss": 0.9347,
"step": 510
},
{
"epoch": 9.80952380952381,
"grad_norm": 0.30078125,
"learning_rate": 6.245636895106402e-05,
"loss": 0.9282,
"step": 515
},
{
"epoch": 9.904761904761905,
"grad_norm": 0.353515625,
"learning_rate": 6.039202339608432e-05,
"loss": 0.9299,
"step": 520
},
{
"epoch": 10.0,
"grad_norm": 0.27734375,
"learning_rate": 5.834750817679606e-05,
"loss": 0.9245,
"step": 525
},
{
"epoch": 10.0,
"eval_loss": 2.6975326538085938,
"eval_runtime": 0.485,
"eval_samples_per_second": 43.303,
"eval_steps_per_second": 2.062,
"step": 525
},
{
"epoch": 10.095238095238095,
"grad_norm": 0.30078125,
"learning_rate": 5.6323846910828735e-05,
"loss": 0.9233,
"step": 530
},
{
"epoch": 10.19047619047619,
"grad_norm": 0.265625,
"learning_rate": 5.432205277496327e-05,
"loss": 0.9235,
"step": 535
},
{
"epoch": 10.285714285714286,
"grad_norm": 0.2734375,
"learning_rate": 5.234312799786921e-05,
"loss": 0.9194,
"step": 540
},
{
"epoch": 10.380952380952381,
"grad_norm": 0.29296875,
"learning_rate": 5.0388063358324134e-05,
"loss": 0.9235,
"step": 545
},
{
"epoch": 10.476190476190476,
"grad_norm": 0.279296875,
"learning_rate": 4.845783768916482e-05,
"loss": 0.9217,
"step": 550
},
{
"epoch": 10.571428571428571,
"grad_norm": 0.265625,
"learning_rate": 4.6553417387219886e-05,
"loss": 0.9243,
"step": 555
},
{
"epoch": 10.666666666666666,
"grad_norm": 0.306640625,
"learning_rate": 4.467575592946864e-05,
"loss": 0.9262,
"step": 560
},
{
"epoch": 10.761904761904763,
"grad_norm": 0.255859375,
"learning_rate": 4.282579339566802e-05,
"loss": 0.9206,
"step": 565
},
{
"epoch": 10.857142857142858,
"grad_norm": 0.2578125,
"learning_rate": 4.100445599768774e-05,
"loss": 0.9256,
"step": 570
},
{
"epoch": 10.952380952380953,
"grad_norm": 0.265625,
"learning_rate": 3.9212655615787804e-05,
"loss": 0.9208,
"step": 575
},
{
"epoch": 10.99047619047619,
"eval_loss": 2.707897663116455,
"eval_runtime": 0.647,
"eval_samples_per_second": 32.456,
"eval_steps_per_second": 1.546,
"step": 577
},
{
"epoch": 11.047619047619047,
"grad_norm": 0.25,
"learning_rate": 3.745128934207225e-05,
"loss": 0.9226,
"step": 580
},
{
"epoch": 11.142857142857142,
"grad_norm": 0.263671875,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.9121,
"step": 585
},
{
"epoch": 11.238095238095237,
"grad_norm": 0.259765625,
"learning_rate": 3.402337085960119e-05,
"loss": 0.9155,
"step": 590
},
{
"epoch": 11.333333333333334,
"grad_norm": 0.2578125,
"learning_rate": 3.235853489035241e-05,
"loss": 0.9194,
"step": 595
},
{
"epoch": 11.428571428571429,
"grad_norm": 0.26953125,
"learning_rate": 3.072756464904006e-05,
"loss": 0.9202,
"step": 600
},
{
"epoch": 11.523809523809524,
"grad_norm": 0.27734375,
"learning_rate": 2.9131276705713006e-05,
"loss": 0.9186,
"step": 605
},
{
"epoch": 11.619047619047619,
"grad_norm": 0.259765625,
"learning_rate": 2.7570470266200176e-05,
"loss": 0.9195,
"step": 610
},
{
"epoch": 11.714285714285714,
"grad_norm": 0.2470703125,
"learning_rate": 2.6045926771976303e-05,
"loss": 0.9223,
"step": 615
},
{
"epoch": 11.80952380952381,
"grad_norm": 0.25,
"learning_rate": 2.4558409508920986e-05,
"loss": 0.9139,
"step": 620
},
{
"epoch": 11.904761904761905,
"grad_norm": 0.248046875,
"learning_rate": 2.3108663225168435e-05,
"loss": 0.9117,
"step": 625
},
{
"epoch": 12.0,
"grad_norm": 0.263671875,
"learning_rate": 2.1697413758237784e-05,
"loss": 0.9195,
"step": 630
},
{
"epoch": 12.0,
"eval_loss": 2.714789867401123,
"eval_runtime": 0.4906,
"eval_samples_per_second": 42.808,
"eval_steps_per_second": 2.038,
"step": 630
},
{
"epoch": 12.095238095238095,
"grad_norm": 0.2392578125,
"learning_rate": 2.032536767163141e-05,
"loss": 0.9167,
"step": 635
},
{
"epoch": 12.19047619047619,
"grad_norm": 0.25,
"learning_rate": 1.8993211901083353e-05,
"loss": 0.9144,
"step": 640
},
{
"epoch": 12.285714285714286,
"grad_norm": 0.2578125,
"learning_rate": 1.7701613410634365e-05,
"loss": 0.915,
"step": 645
},
{
"epoch": 12.380952380952381,
"grad_norm": 0.248046875,
"learning_rate": 1.6451218858706374e-05,
"loss": 0.9153,
"step": 650
},
{
"epoch": 12.476190476190476,
"grad_norm": 0.251953125,
"learning_rate": 1.5242654274342894e-05,
"loss": 0.9133,
"step": 655
},
{
"epoch": 12.571428571428571,
"grad_norm": 0.25,
"learning_rate": 1.4076524743778319e-05,
"loss": 0.9134,
"step": 660
},
{
"epoch": 12.666666666666666,
"grad_norm": 0.248046875,
"learning_rate": 1.295341410749208e-05,
"loss": 0.9126,
"step": 665
},
{
"epoch": 12.761904761904763,
"grad_norm": 0.25,
"learning_rate": 1.1873884667900125e-05,
"loss": 0.9094,
"step": 670
},
{
"epoch": 12.857142857142858,
"grad_norm": 0.236328125,
"learning_rate": 1.083847690782972e-05,
"loss": 0.9183,
"step": 675
},
{
"epoch": 12.952380952380953,
"grad_norm": 0.255859375,
"learning_rate": 9.8477092199184e-06,
"loss": 0.9212,
"step": 680
},
{
"epoch": 12.99047619047619,
"eval_loss": 2.7153878211975098,
"eval_runtime": 0.5024,
"eval_samples_per_second": 41.796,
"eval_steps_per_second": 1.99,
"step": 682
},
{
"epoch": 13.047619047619047,
"grad_norm": 0.2431640625,
"learning_rate": 8.902077647072881e-06,
"loss": 0.9139,
"step": 685
},
{
"epoch": 13.142857142857142,
"grad_norm": 0.2431640625,
"learning_rate": 8.002055634117578e-06,
"loss": 0.9123,
"step": 690
},
{
"epoch": 13.238095238095237,
"grad_norm": 0.2470703125,
"learning_rate": 7.148093790757371e-06,
"loss": 0.9152,
"step": 695
},
{
"epoch": 13.333333333333334,
"grad_norm": 0.2578125,
"learning_rate": 6.3406196659728465e-06,
"loss": 0.9087,
"step": 700
},
{
"epoch": 13.333333333333334,
"step": 700,
"total_flos": 1.1018283868225536e+18,
"train_loss": 0.0,
"train_runtime": 3.5395,
"train_samples_per_second": 37654.826,
"train_steps_per_second": 146.913
}
],
"logging_steps": 5,
"max_steps": 520,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1018283868225536e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}