vivit-MSASL-dataset / trainer_state.json
Ponleur's picture
Upload 8 files
4dc0b59 verified
{
"best_metric": 0.9285714285714286,
"best_model_checkpoint": "vivit-b-16x2-1-finetuned-CSL-dataset/checkpoint-8755",
"epoch": 84.01,
"eval_steps": 500,
"global_step": 8755,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000970873786407767,
"grad_norm": 22.467844009399414,
"learning_rate": 4.854368932038835e-07,
"loss": 2.3447,
"step": 10
},
{
"epoch": 0.001941747572815534,
"grad_norm": 20.65631675720215,
"learning_rate": 9.70873786407767e-07,
"loss": 2.3482,
"step": 20
},
{
"epoch": 0.002912621359223301,
"grad_norm": 20.56620216369629,
"learning_rate": 1.4563106796116506e-06,
"loss": 2.3327,
"step": 30
},
{
"epoch": 0.003883495145631068,
"grad_norm": 20.642162322998047,
"learning_rate": 1.941747572815534e-06,
"loss": 2.3973,
"step": 40
},
{
"epoch": 0.0048543689320388345,
"grad_norm": 20.541858673095703,
"learning_rate": 2.4271844660194174e-06,
"loss": 2.252,
"step": 50
},
{
"epoch": 0.005825242718446602,
"grad_norm": 23.094024658203125,
"learning_rate": 2.912621359223301e-06,
"loss": 2.6397,
"step": 60
},
{
"epoch": 0.006796116504854369,
"grad_norm": 20.131628036499023,
"learning_rate": 3.3980582524271844e-06,
"loss": 2.2672,
"step": 70
},
{
"epoch": 0.007766990291262136,
"grad_norm": 23.084117889404297,
"learning_rate": 3.883495145631068e-06,
"loss": 2.4271,
"step": 80
},
{
"epoch": 0.008737864077669903,
"grad_norm": 21.572179794311523,
"learning_rate": 4.368932038834952e-06,
"loss": 2.3928,
"step": 90
},
{
"epoch": 0.009708737864077669,
"grad_norm": 20.966886520385742,
"learning_rate": 4.854368932038835e-06,
"loss": 2.279,
"step": 100
},
{
"epoch": 0.01,
"eval_accuracy": 0.17857142857142858,
"eval_loss": 2.246880054473877,
"eval_runtime": 21.6963,
"eval_samples_per_second": 1.291,
"eval_steps_per_second": 0.645,
"step": 103
},
{
"epoch": 1.0006796116504855,
"grad_norm": 20.04715347290039,
"learning_rate": 5.3398058252427185e-06,
"loss": 2.1439,
"step": 110
},
{
"epoch": 1.0016504854368933,
"grad_norm": 22.203516006469727,
"learning_rate": 5.825242718446602e-06,
"loss": 2.3037,
"step": 120
},
{
"epoch": 1.0026213592233009,
"grad_norm": 20.944007873535156,
"learning_rate": 6.310679611650486e-06,
"loss": 2.4872,
"step": 130
},
{
"epoch": 1.0035922330097087,
"grad_norm": 24.46585464477539,
"learning_rate": 6.796116504854369e-06,
"loss": 2.156,
"step": 140
},
{
"epoch": 1.0045631067961165,
"grad_norm": 19.93899917602539,
"learning_rate": 7.281553398058253e-06,
"loss": 2.1715,
"step": 150
},
{
"epoch": 1.0055339805825243,
"grad_norm": 21.357013702392578,
"learning_rate": 7.766990291262136e-06,
"loss": 2.1723,
"step": 160
},
{
"epoch": 1.0065048543689321,
"grad_norm": 19.363840103149414,
"learning_rate": 8.25242718446602e-06,
"loss": 2.1195,
"step": 170
},
{
"epoch": 1.0074757281553397,
"grad_norm": 22.652414321899414,
"learning_rate": 8.737864077669904e-06,
"loss": 2.2942,
"step": 180
},
{
"epoch": 1.0084466019417475,
"grad_norm": 21.458127975463867,
"learning_rate": 9.223300970873788e-06,
"loss": 2.1016,
"step": 190
},
{
"epoch": 1.0094174757281553,
"grad_norm": 20.534849166870117,
"learning_rate": 9.70873786407767e-06,
"loss": 2.0281,
"step": 200
},
{
"epoch": 1.01,
"eval_accuracy": 0.35714285714285715,
"eval_loss": 2.0716233253479004,
"eval_runtime": 23.0344,
"eval_samples_per_second": 1.216,
"eval_steps_per_second": 0.608,
"step": 206
},
{
"epoch": 2.000388349514563,
"grad_norm": 17.832265853881836,
"learning_rate": 1.0194174757281553e-05,
"loss": 2.0753,
"step": 210
},
{
"epoch": 2.001359223300971,
"grad_norm": 25.544200897216797,
"learning_rate": 1.0679611650485437e-05,
"loss": 1.9507,
"step": 220
},
{
"epoch": 2.0023300970873787,
"grad_norm": 21.588672637939453,
"learning_rate": 1.116504854368932e-05,
"loss": 1.8433,
"step": 230
},
{
"epoch": 2.0033009708737866,
"grad_norm": 20.822473526000977,
"learning_rate": 1.1650485436893204e-05,
"loss": 1.8264,
"step": 240
},
{
"epoch": 2.0042718446601944,
"grad_norm": 20.417675018310547,
"learning_rate": 1.2135922330097088e-05,
"loss": 1.5772,
"step": 250
},
{
"epoch": 2.0052427184466017,
"grad_norm": 21.39651870727539,
"learning_rate": 1.2621359223300972e-05,
"loss": 2.0638,
"step": 260
},
{
"epoch": 2.0062135922330095,
"grad_norm": 23.192445755004883,
"learning_rate": 1.3106796116504854e-05,
"loss": 1.6459,
"step": 270
},
{
"epoch": 2.0071844660194174,
"grad_norm": 20.562423706054688,
"learning_rate": 1.3592233009708738e-05,
"loss": 1.7667,
"step": 280
},
{
"epoch": 2.008155339805825,
"grad_norm": 21.67121124267578,
"learning_rate": 1.4077669902912621e-05,
"loss": 1.7898,
"step": 290
},
{
"epoch": 2.009126213592233,
"grad_norm": 22.14192008972168,
"learning_rate": 1.4563106796116505e-05,
"loss": 1.7549,
"step": 300
},
{
"epoch": 2.01,
"eval_accuracy": 0.35714285714285715,
"eval_loss": 1.7472134828567505,
"eval_runtime": 22.0721,
"eval_samples_per_second": 1.269,
"eval_steps_per_second": 0.634,
"step": 309
},
{
"epoch": 3.0000970873786406,
"grad_norm": 20.45794105529785,
"learning_rate": 1.5048543689320387e-05,
"loss": 1.688,
"step": 310
},
{
"epoch": 3.0010679611650484,
"grad_norm": 12.089913368225098,
"learning_rate": 1.5533980582524273e-05,
"loss": 1.109,
"step": 320
},
{
"epoch": 3.002038834951456,
"grad_norm": 17.196495056152344,
"learning_rate": 1.6019417475728158e-05,
"loss": 1.3952,
"step": 330
},
{
"epoch": 3.003009708737864,
"grad_norm": 28.026491165161133,
"learning_rate": 1.650485436893204e-05,
"loss": 1.3662,
"step": 340
},
{
"epoch": 3.003980582524272,
"grad_norm": 16.75193214416504,
"learning_rate": 1.6990291262135926e-05,
"loss": 1.359,
"step": 350
},
{
"epoch": 3.0049514563106796,
"grad_norm": 20.51288414001465,
"learning_rate": 1.7475728155339808e-05,
"loss": 1.3948,
"step": 360
},
{
"epoch": 3.0059223300970874,
"grad_norm": 22.2510986328125,
"learning_rate": 1.796116504854369e-05,
"loss": 1.2545,
"step": 370
},
{
"epoch": 3.0068932038834952,
"grad_norm": 21.27890968322754,
"learning_rate": 1.8446601941747575e-05,
"loss": 1.187,
"step": 380
},
{
"epoch": 3.007864077669903,
"grad_norm": 22.77427864074707,
"learning_rate": 1.8932038834951457e-05,
"loss": 1.4674,
"step": 390
},
{
"epoch": 3.008834951456311,
"grad_norm": 10.05604076385498,
"learning_rate": 1.941747572815534e-05,
"loss": 1.0241,
"step": 400
},
{
"epoch": 3.0098058252427187,
"grad_norm": 19.90476417541504,
"learning_rate": 1.9902912621359225e-05,
"loss": 1.3771,
"step": 410
},
{
"epoch": 3.01,
"eval_accuracy": 0.4642857142857143,
"eval_loss": 1.403342604637146,
"eval_runtime": 22.8981,
"eval_samples_per_second": 1.223,
"eval_steps_per_second": 0.611,
"step": 412
},
{
"epoch": 4.000776699029126,
"grad_norm": 12.447587013244629,
"learning_rate": 2.0388349514563107e-05,
"loss": 0.764,
"step": 420
},
{
"epoch": 4.001747572815534,
"grad_norm": 17.31973648071289,
"learning_rate": 2.0873786407766992e-05,
"loss": 0.8629,
"step": 430
},
{
"epoch": 4.002718446601942,
"grad_norm": 11.099453926086426,
"learning_rate": 2.1359223300970874e-05,
"loss": 0.8132,
"step": 440
},
{
"epoch": 4.003689320388349,
"grad_norm": 12.586014747619629,
"learning_rate": 2.1844660194174756e-05,
"loss": 0.5925,
"step": 450
},
{
"epoch": 4.0046601941747575,
"grad_norm": 9.193548202514648,
"learning_rate": 2.233009708737864e-05,
"loss": 0.7083,
"step": 460
},
{
"epoch": 4.005631067961165,
"grad_norm": 14.5839204788208,
"learning_rate": 2.2815533980582527e-05,
"loss": 0.9439,
"step": 470
},
{
"epoch": 4.006601941747573,
"grad_norm": 20.9216251373291,
"learning_rate": 2.330097087378641e-05,
"loss": 0.9033,
"step": 480
},
{
"epoch": 4.0075728155339805,
"grad_norm": 17.041528701782227,
"learning_rate": 2.3786407766990294e-05,
"loss": 0.6828,
"step": 490
},
{
"epoch": 4.008543689320389,
"grad_norm": 15.716273307800293,
"learning_rate": 2.4271844660194176e-05,
"loss": 0.8407,
"step": 500
},
{
"epoch": 4.009514563106796,
"grad_norm": 8.393412590026855,
"learning_rate": 2.475728155339806e-05,
"loss": 0.7138,
"step": 510
},
{
"epoch": 4.01,
"eval_accuracy": 0.75,
"eval_loss": 0.9906893968582153,
"eval_runtime": 24.635,
"eval_samples_per_second": 1.137,
"eval_steps_per_second": 0.568,
"step": 515
},
{
"epoch": 5.000485436893204,
"grad_norm": 11.11153507232666,
"learning_rate": 2.5242718446601944e-05,
"loss": 0.7188,
"step": 520
},
{
"epoch": 5.001456310679612,
"grad_norm": 9.15324592590332,
"learning_rate": 2.5728155339805826e-05,
"loss": 0.6506,
"step": 530
},
{
"epoch": 5.002427184466019,
"grad_norm": 29.149995803833008,
"learning_rate": 2.6213592233009708e-05,
"loss": 0.6094,
"step": 540
},
{
"epoch": 5.003398058252428,
"grad_norm": 2.26300311088562,
"learning_rate": 2.6699029126213593e-05,
"loss": 0.6027,
"step": 550
},
{
"epoch": 5.004368932038835,
"grad_norm": 19.024925231933594,
"learning_rate": 2.7184466019417475e-05,
"loss": 0.5463,
"step": 560
},
{
"epoch": 5.005339805825242,
"grad_norm": 22.636201858520508,
"learning_rate": 2.766990291262136e-05,
"loss": 0.4183,
"step": 570
},
{
"epoch": 5.0063106796116505,
"grad_norm": 20.816205978393555,
"learning_rate": 2.8155339805825243e-05,
"loss": 0.6446,
"step": 580
},
{
"epoch": 5.007281553398058,
"grad_norm": 29.111713409423828,
"learning_rate": 2.8640776699029125e-05,
"loss": 0.3961,
"step": 590
},
{
"epoch": 5.008252427184466,
"grad_norm": 19.05779457092285,
"learning_rate": 2.912621359223301e-05,
"loss": 0.8104,
"step": 600
},
{
"epoch": 5.0092233009708735,
"grad_norm": 6.909221649169922,
"learning_rate": 2.9611650485436892e-05,
"loss": 0.5151,
"step": 610
},
{
"epoch": 5.01,
"eval_accuracy": 0.6785714285714286,
"eval_loss": 0.902064859867096,
"eval_runtime": 23.7547,
"eval_samples_per_second": 1.179,
"eval_steps_per_second": 0.589,
"step": 618
},
{
"epoch": 6.000194174757281,
"grad_norm": 3.888197660446167,
"learning_rate": 3.0097087378640774e-05,
"loss": 0.7196,
"step": 620
},
{
"epoch": 6.001165048543689,
"grad_norm": 17.437088012695312,
"learning_rate": 3.058252427184466e-05,
"loss": 0.4851,
"step": 630
},
{
"epoch": 6.002135922330097,
"grad_norm": 26.189762115478516,
"learning_rate": 3.1067961165048545e-05,
"loss": 0.5041,
"step": 640
},
{
"epoch": 6.003106796116505,
"grad_norm": 3.272768497467041,
"learning_rate": 3.155339805825243e-05,
"loss": 0.4218,
"step": 650
},
{
"epoch": 6.004077669902912,
"grad_norm": 9.38428783416748,
"learning_rate": 3.2038834951456316e-05,
"loss": 0.3978,
"step": 660
},
{
"epoch": 6.005048543689321,
"grad_norm": 1.1252257823944092,
"learning_rate": 3.2524271844660195e-05,
"loss": 0.3776,
"step": 670
},
{
"epoch": 6.006019417475728,
"grad_norm": 10.434605598449707,
"learning_rate": 3.300970873786408e-05,
"loss": 0.5352,
"step": 680
},
{
"epoch": 6.006990291262136,
"grad_norm": 23.116561889648438,
"learning_rate": 3.3495145631067966e-05,
"loss": 0.3432,
"step": 690
},
{
"epoch": 6.007961165048544,
"grad_norm": 0.4382549524307251,
"learning_rate": 3.398058252427185e-05,
"loss": 0.1816,
"step": 700
},
{
"epoch": 6.008932038834952,
"grad_norm": 0.36748507618904114,
"learning_rate": 3.446601941747573e-05,
"loss": 0.2608,
"step": 710
},
{
"epoch": 6.009902912621359,
"grad_norm": 2.5113391876220703,
"learning_rate": 3.4951456310679615e-05,
"loss": 0.5006,
"step": 720
},
{
"epoch": 6.01,
"eval_accuracy": 0.6785714285714286,
"eval_loss": 0.7181605696678162,
"eval_runtime": 24.3307,
"eval_samples_per_second": 1.151,
"eval_steps_per_second": 0.575,
"step": 721
},
{
"epoch": 7.000873786407767,
"grad_norm": 10.877245903015137,
"learning_rate": 3.54368932038835e-05,
"loss": 0.4433,
"step": 730
},
{
"epoch": 7.001844660194175,
"grad_norm": 1.7350434064865112,
"learning_rate": 3.592233009708738e-05,
"loss": 0.0579,
"step": 740
},
{
"epoch": 7.002815533980582,
"grad_norm": 0.6261988878250122,
"learning_rate": 3.6407766990291265e-05,
"loss": 0.2931,
"step": 750
},
{
"epoch": 7.003786407766991,
"grad_norm": 0.4281288981437683,
"learning_rate": 3.689320388349515e-05,
"loss": 0.2701,
"step": 760
},
{
"epoch": 7.004757281553398,
"grad_norm": 36.368003845214844,
"learning_rate": 3.737864077669903e-05,
"loss": 0.3428,
"step": 770
},
{
"epoch": 7.005728155339805,
"grad_norm": 3.990633487701416,
"learning_rate": 3.7864077669902914e-05,
"loss": 0.4855,
"step": 780
},
{
"epoch": 7.006699029126214,
"grad_norm": 2.520648717880249,
"learning_rate": 3.83495145631068e-05,
"loss": 0.4303,
"step": 790
},
{
"epoch": 7.007669902912621,
"grad_norm": 17.141321182250977,
"learning_rate": 3.883495145631068e-05,
"loss": 0.2198,
"step": 800
},
{
"epoch": 7.008640776699029,
"grad_norm": 0.15869013965129852,
"learning_rate": 3.9320388349514564e-05,
"loss": 0.2738,
"step": 810
},
{
"epoch": 7.009611650485437,
"grad_norm": 29.869491577148438,
"learning_rate": 3.980582524271845e-05,
"loss": 0.2405,
"step": 820
},
{
"epoch": 7.01,
"eval_accuracy": 0.75,
"eval_loss": 0.6587792038917542,
"eval_runtime": 22.1804,
"eval_samples_per_second": 1.262,
"eval_steps_per_second": 0.631,
"step": 824
},
{
"epoch": 8.000582524271845,
"grad_norm": 17.194149017333984,
"learning_rate": 4.029126213592233e-05,
"loss": 0.5319,
"step": 830
},
{
"epoch": 8.001553398058252,
"grad_norm": 4.293007850646973,
"learning_rate": 4.077669902912621e-05,
"loss": 0.1672,
"step": 840
},
{
"epoch": 8.00252427184466,
"grad_norm": 0.18113088607788086,
"learning_rate": 4.12621359223301e-05,
"loss": 0.1068,
"step": 850
},
{
"epoch": 8.003495145631067,
"grad_norm": 44.19035720825195,
"learning_rate": 4.1747572815533984e-05,
"loss": 0.2801,
"step": 860
},
{
"epoch": 8.004466019417476,
"grad_norm": 65.8356704711914,
"learning_rate": 4.223300970873786e-05,
"loss": 0.1317,
"step": 870
},
{
"epoch": 8.005436893203884,
"grad_norm": 0.2686673402786255,
"learning_rate": 4.271844660194175e-05,
"loss": 0.7435,
"step": 880
},
{
"epoch": 8.006407766990291,
"grad_norm": 1.2562230825424194,
"learning_rate": 4.3203883495145634e-05,
"loss": 0.3022,
"step": 890
},
{
"epoch": 8.007378640776698,
"grad_norm": 14.710217475891113,
"learning_rate": 4.368932038834951e-05,
"loss": 0.0811,
"step": 900
},
{
"epoch": 8.008349514563108,
"grad_norm": 32.04058837890625,
"learning_rate": 4.4174757281553404e-05,
"loss": 0.3929,
"step": 910
},
{
"epoch": 8.009320388349515,
"grad_norm": 0.11743175983428955,
"learning_rate": 4.466019417475728e-05,
"loss": 0.1597,
"step": 920
},
{
"epoch": 8.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.6845706105232239,
"eval_runtime": 23.6317,
"eval_samples_per_second": 1.185,
"eval_steps_per_second": 0.592,
"step": 927
},
{
"epoch": 9.000291262135923,
"grad_norm": 0.2228081375360489,
"learning_rate": 4.514563106796117e-05,
"loss": 0.3838,
"step": 930
},
{
"epoch": 9.00126213592233,
"grad_norm": 0.687592625617981,
"learning_rate": 4.5631067961165054e-05,
"loss": 0.0609,
"step": 940
},
{
"epoch": 9.002233009708737,
"grad_norm": 1.2621179819107056,
"learning_rate": 4.611650485436894e-05,
"loss": 0.0247,
"step": 950
},
{
"epoch": 9.003203883495146,
"grad_norm": 0.07675457000732422,
"learning_rate": 4.660194174757282e-05,
"loss": 0.1207,
"step": 960
},
{
"epoch": 9.004174757281554,
"grad_norm": 9.658039093017578,
"learning_rate": 4.7087378640776703e-05,
"loss": 0.2541,
"step": 970
},
{
"epoch": 9.005145631067961,
"grad_norm": 18.839038848876953,
"learning_rate": 4.757281553398059e-05,
"loss": 0.134,
"step": 980
},
{
"epoch": 9.006116504854369,
"grad_norm": 50.27986526489258,
"learning_rate": 4.805825242718447e-05,
"loss": 0.5972,
"step": 990
},
{
"epoch": 9.007087378640776,
"grad_norm": 0.04356055706739426,
"learning_rate": 4.854368932038835e-05,
"loss": 0.3244,
"step": 1000
},
{
"epoch": 9.008058252427185,
"grad_norm": 7.938558101654053,
"learning_rate": 4.902912621359224e-05,
"loss": 0.3061,
"step": 1010
},
{
"epoch": 9.009029126213592,
"grad_norm": 0.2315950244665146,
"learning_rate": 4.951456310679612e-05,
"loss": 0.5862,
"step": 1020
},
{
"epoch": 9.01,
"grad_norm": 22.759157180786133,
"learning_rate": 5e-05,
"loss": 0.5375,
"step": 1030
},
{
"epoch": 9.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 0.9473233222961426,
"eval_runtime": 22.9603,
"eval_samples_per_second": 1.219,
"eval_steps_per_second": 0.61,
"step": 1030
},
{
"epoch": 10.000970873786407,
"grad_norm": 5.556157112121582,
"learning_rate": 4.994606256742179e-05,
"loss": 0.0522,
"step": 1040
},
{
"epoch": 10.001941747572815,
"grad_norm": 1.0724961757659912,
"learning_rate": 4.9892125134843584e-05,
"loss": 0.1515,
"step": 1050
},
{
"epoch": 10.002912621359224,
"grad_norm": 3.1406235694885254,
"learning_rate": 4.983818770226538e-05,
"loss": 0.0441,
"step": 1060
},
{
"epoch": 10.003883495145631,
"grad_norm": 0.6284095048904419,
"learning_rate": 4.9784250269687166e-05,
"loss": 0.4542,
"step": 1070
},
{
"epoch": 10.004854368932039,
"grad_norm": 0.07982558012008667,
"learning_rate": 4.9730312837108953e-05,
"loss": 0.1927,
"step": 1080
},
{
"epoch": 10.005825242718446,
"grad_norm": 0.1747860461473465,
"learning_rate": 4.967637540453075e-05,
"loss": 0.5037,
"step": 1090
},
{
"epoch": 10.006796116504855,
"grad_norm": 65.43592071533203,
"learning_rate": 4.962243797195254e-05,
"loss": 0.3528,
"step": 1100
},
{
"epoch": 10.007766990291262,
"grad_norm": 0.08845037966966629,
"learning_rate": 4.956850053937433e-05,
"loss": 0.106,
"step": 1110
},
{
"epoch": 10.00873786407767,
"grad_norm": 0.1873186081647873,
"learning_rate": 4.951456310679612e-05,
"loss": 0.2798,
"step": 1120
},
{
"epoch": 10.009708737864077,
"grad_norm": 0.4424811601638794,
"learning_rate": 4.946062567421791e-05,
"loss": 0.211,
"step": 1130
},
{
"epoch": 10.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.5934346914291382,
"eval_runtime": 22.0268,
"eval_samples_per_second": 1.271,
"eval_steps_per_second": 0.636,
"step": 1133
},
{
"epoch": 11.000679611650485,
"grad_norm": 0.2165905088186264,
"learning_rate": 4.94066882416397e-05,
"loss": 0.2803,
"step": 1140
},
{
"epoch": 11.001650485436894,
"grad_norm": 0.07424941658973694,
"learning_rate": 4.935275080906149e-05,
"loss": 0.351,
"step": 1150
},
{
"epoch": 11.002621359223301,
"grad_norm": 0.09780796617269516,
"learning_rate": 4.929881337648328e-05,
"loss": 0.0482,
"step": 1160
},
{
"epoch": 11.003592233009709,
"grad_norm": 0.1673489362001419,
"learning_rate": 4.9244875943905075e-05,
"loss": 0.2821,
"step": 1170
},
{
"epoch": 11.004563106796116,
"grad_norm": 0.5309020280838013,
"learning_rate": 4.919093851132686e-05,
"loss": 0.0207,
"step": 1180
},
{
"epoch": 11.005533980582523,
"grad_norm": 0.4277690052986145,
"learning_rate": 4.913700107874865e-05,
"loss": 0.1813,
"step": 1190
},
{
"epoch": 11.006504854368933,
"grad_norm": 50.44525146484375,
"learning_rate": 4.9083063646170444e-05,
"loss": 0.1278,
"step": 1200
},
{
"epoch": 11.00747572815534,
"grad_norm": 36.960025787353516,
"learning_rate": 4.902912621359224e-05,
"loss": 0.706,
"step": 1210
},
{
"epoch": 11.008446601941747,
"grad_norm": 0.1074225977063179,
"learning_rate": 4.8975188781014026e-05,
"loss": 0.2245,
"step": 1220
},
{
"epoch": 11.009417475728155,
"grad_norm": 7.5070953369140625,
"learning_rate": 4.892125134843581e-05,
"loss": 0.1138,
"step": 1230
},
{
"epoch": 11.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 0.5115043520927429,
"eval_runtime": 22.3991,
"eval_samples_per_second": 1.25,
"eval_steps_per_second": 0.625,
"step": 1236
},
{
"epoch": 12.000388349514562,
"grad_norm": 0.015224846079945564,
"learning_rate": 4.886731391585761e-05,
"loss": 0.5137,
"step": 1240
},
{
"epoch": 12.001359223300971,
"grad_norm": 0.04116909205913544,
"learning_rate": 4.88133764832794e-05,
"loss": 0.2102,
"step": 1250
},
{
"epoch": 12.002330097087379,
"grad_norm": 0.717492938041687,
"learning_rate": 4.875943905070119e-05,
"loss": 0.2027,
"step": 1260
},
{
"epoch": 12.003300970873786,
"grad_norm": 0.05657333508133888,
"learning_rate": 4.870550161812298e-05,
"loss": 0.013,
"step": 1270
},
{
"epoch": 12.004271844660193,
"grad_norm": 0.13661307096481323,
"learning_rate": 4.865156418554477e-05,
"loss": 0.0022,
"step": 1280
},
{
"epoch": 12.005242718446603,
"grad_norm": 6.737663745880127,
"learning_rate": 4.8597626752966565e-05,
"loss": 0.0394,
"step": 1290
},
{
"epoch": 12.00621359223301,
"grad_norm": 0.03948163241147995,
"learning_rate": 4.854368932038835e-05,
"loss": 0.0555,
"step": 1300
},
{
"epoch": 12.007184466019417,
"grad_norm": 0.15491877496242523,
"learning_rate": 4.848975188781014e-05,
"loss": 0.0146,
"step": 1310
},
{
"epoch": 12.008155339805825,
"grad_norm": 0.01624881848692894,
"learning_rate": 4.8435814455231935e-05,
"loss": 0.1571,
"step": 1320
},
{
"epoch": 12.009126213592234,
"grad_norm": 0.07326442748308182,
"learning_rate": 4.838187702265373e-05,
"loss": 0.2601,
"step": 1330
},
{
"epoch": 12.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 0.6398950815200806,
"eval_runtime": 23.1317,
"eval_samples_per_second": 1.21,
"eval_steps_per_second": 0.605,
"step": 1339
},
{
"epoch": 13.000097087378641,
"grad_norm": 9.104988098144531,
"learning_rate": 4.8327939590075516e-05,
"loss": 0.1086,
"step": 1340
},
{
"epoch": 13.001067961165049,
"grad_norm": 2.3358242511749268,
"learning_rate": 4.8274002157497304e-05,
"loss": 0.2874,
"step": 1350
},
{
"epoch": 13.002038834951456,
"grad_norm": 0.0636238306760788,
"learning_rate": 4.82200647249191e-05,
"loss": 0.0941,
"step": 1360
},
{
"epoch": 13.003009708737864,
"grad_norm": 0.1735282987356186,
"learning_rate": 4.8166127292340886e-05,
"loss": 0.0091,
"step": 1370
},
{
"epoch": 13.003980582524273,
"grad_norm": 42.18791580200195,
"learning_rate": 4.811218985976268e-05,
"loss": 0.1626,
"step": 1380
},
{
"epoch": 13.00495145631068,
"grad_norm": 0.5081796646118164,
"learning_rate": 4.805825242718447e-05,
"loss": 0.1437,
"step": 1390
},
{
"epoch": 13.005922330097087,
"grad_norm": 0.21763081848621368,
"learning_rate": 4.800431499460626e-05,
"loss": 0.398,
"step": 1400
},
{
"epoch": 13.006893203883495,
"grad_norm": 0.023743387311697006,
"learning_rate": 4.795037756202805e-05,
"loss": 0.2665,
"step": 1410
},
{
"epoch": 13.007864077669902,
"grad_norm": 0.03076467476785183,
"learning_rate": 4.789644012944984e-05,
"loss": 0.3967,
"step": 1420
},
{
"epoch": 13.008834951456311,
"grad_norm": 0.02074982225894928,
"learning_rate": 4.784250269687163e-05,
"loss": 0.0464,
"step": 1430
},
{
"epoch": 13.009805825242719,
"grad_norm": 0.7776718735694885,
"learning_rate": 4.7788565264293425e-05,
"loss": 0.0137,
"step": 1440
},
{
"epoch": 13.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 0.791401207447052,
"eval_runtime": 23.1852,
"eval_samples_per_second": 1.208,
"eval_steps_per_second": 0.604,
"step": 1442
},
{
"epoch": 14.000776699029126,
"grad_norm": 0.018326900899410248,
"learning_rate": 4.773462783171521e-05,
"loss": 0.0724,
"step": 1450
},
{
"epoch": 14.001747572815534,
"grad_norm": 35.21026611328125,
"learning_rate": 4.7680690399137e-05,
"loss": 0.5157,
"step": 1460
},
{
"epoch": 14.002718446601941,
"grad_norm": 12.082097053527832,
"learning_rate": 4.7626752966558795e-05,
"loss": 0.4113,
"step": 1470
},
{
"epoch": 14.00368932038835,
"grad_norm": 0.03801337629556656,
"learning_rate": 4.757281553398059e-05,
"loss": 0.119,
"step": 1480
},
{
"epoch": 14.004660194174757,
"grad_norm": 36.30195236206055,
"learning_rate": 4.7518878101402376e-05,
"loss": 0.767,
"step": 1490
},
{
"epoch": 14.005631067961165,
"grad_norm": 46.10127639770508,
"learning_rate": 4.7464940668824164e-05,
"loss": 0.6013,
"step": 1500
},
{
"epoch": 14.006601941747572,
"grad_norm": 0.1389230340719223,
"learning_rate": 4.741100323624595e-05,
"loss": 0.1345,
"step": 1510
},
{
"epoch": 14.007572815533981,
"grad_norm": 0.04179445654153824,
"learning_rate": 4.735706580366775e-05,
"loss": 0.3598,
"step": 1520
},
{
"epoch": 14.008543689320389,
"grad_norm": 0.38324692845344543,
"learning_rate": 4.730312837108954e-05,
"loss": 0.0101,
"step": 1530
},
{
"epoch": 14.009514563106796,
"grad_norm": 68.41896057128906,
"learning_rate": 4.724919093851133e-05,
"loss": 0.7928,
"step": 1540
},
{
"epoch": 14.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 0.6824273467063904,
"eval_runtime": 23.6669,
"eval_samples_per_second": 1.183,
"eval_steps_per_second": 0.592,
"step": 1545
},
{
"epoch": 15.000485436893204,
"grad_norm": 0.011549362912774086,
"learning_rate": 4.7195253505933115e-05,
"loss": 0.005,
"step": 1550
},
{
"epoch": 15.001456310679611,
"grad_norm": 0.14075197279453278,
"learning_rate": 4.7141316073354916e-05,
"loss": 0.0108,
"step": 1560
},
{
"epoch": 15.00242718446602,
"grad_norm": 0.02513744682073593,
"learning_rate": 4.7087378640776703e-05,
"loss": 0.3968,
"step": 1570
},
{
"epoch": 15.003398058252428,
"grad_norm": 25.308015823364258,
"learning_rate": 4.703344120819849e-05,
"loss": 0.0201,
"step": 1580
},
{
"epoch": 15.004368932038835,
"grad_norm": 0.09266715496778488,
"learning_rate": 4.697950377562028e-05,
"loss": 0.0026,
"step": 1590
},
{
"epoch": 15.005339805825242,
"grad_norm": 0.005255271680653095,
"learning_rate": 4.692556634304207e-05,
"loss": 0.1314,
"step": 1600
},
{
"epoch": 15.00631067961165,
"grad_norm": 0.808135449886322,
"learning_rate": 4.687162891046387e-05,
"loss": 0.1985,
"step": 1610
},
{
"epoch": 15.007281553398059,
"grad_norm": 1.363420844078064,
"learning_rate": 4.6817691477885654e-05,
"loss": 0.2999,
"step": 1620
},
{
"epoch": 15.008252427184466,
"grad_norm": 0.02775932289659977,
"learning_rate": 4.676375404530744e-05,
"loss": 0.003,
"step": 1630
},
{
"epoch": 15.009223300970874,
"grad_norm": 0.03792349994182587,
"learning_rate": 4.6709816612729236e-05,
"loss": 0.006,
"step": 1640
},
{
"epoch": 15.01,
"eval_accuracy": 0.75,
"eval_loss": 1.3542832136154175,
"eval_runtime": 22.979,
"eval_samples_per_second": 1.219,
"eval_steps_per_second": 0.609,
"step": 1648
},
{
"epoch": 16.000194174757283,
"grad_norm": 0.17369619011878967,
"learning_rate": 4.665587918015103e-05,
"loss": 0.4932,
"step": 1650
},
{
"epoch": 16.00116504854369,
"grad_norm": 0.14237922430038452,
"learning_rate": 4.660194174757282e-05,
"loss": 0.0144,
"step": 1660
},
{
"epoch": 16.002135922330098,
"grad_norm": 0.210392564535141,
"learning_rate": 4.6548004314994605e-05,
"loss": 0.0016,
"step": 1670
},
{
"epoch": 16.003106796116505,
"grad_norm": 0.11072167754173279,
"learning_rate": 4.64940668824164e-05,
"loss": 0.5947,
"step": 1680
},
{
"epoch": 16.004077669902912,
"grad_norm": 0.003637285903096199,
"learning_rate": 4.644012944983819e-05,
"loss": 0.1469,
"step": 1690
},
{
"epoch": 16.00504854368932,
"grad_norm": 0.04335380345582962,
"learning_rate": 4.638619201725998e-05,
"loss": 0.0006,
"step": 1700
},
{
"epoch": 16.006019417475727,
"grad_norm": 0.025841353461146355,
"learning_rate": 4.633225458468177e-05,
"loss": 0.2231,
"step": 1710
},
{
"epoch": 16.006990291262134,
"grad_norm": 0.011378253810107708,
"learning_rate": 4.627831715210356e-05,
"loss": 0.5424,
"step": 1720
},
{
"epoch": 16.007961165048545,
"grad_norm": 0.7177201509475708,
"learning_rate": 4.622437971952535e-05,
"loss": 0.0066,
"step": 1730
},
{
"epoch": 16.008932038834953,
"grad_norm": 0.008238316513597965,
"learning_rate": 4.617044228694714e-05,
"loss": 0.0477,
"step": 1740
},
{
"epoch": 16.00990291262136,
"grad_norm": 0.06602713465690613,
"learning_rate": 4.611650485436894e-05,
"loss": 0.0007,
"step": 1750
},
{
"epoch": 16.01,
"eval_accuracy": 0.7142857142857143,
"eval_loss": 1.6129531860351562,
"eval_runtime": 22.0216,
"eval_samples_per_second": 1.271,
"eval_steps_per_second": 0.636,
"step": 1751
},
{
"epoch": 17.000873786407766,
"grad_norm": 25.33762550354004,
"learning_rate": 4.606256742179073e-05,
"loss": 0.7436,
"step": 1760
},
{
"epoch": 17.001844660194173,
"grad_norm": 0.5531057715415955,
"learning_rate": 4.6008629989212514e-05,
"loss": 0.0023,
"step": 1770
},
{
"epoch": 17.002815533980584,
"grad_norm": 0.004456685855984688,
"learning_rate": 4.59546925566343e-05,
"loss": 0.0071,
"step": 1780
},
{
"epoch": 17.00378640776699,
"grad_norm": 0.019219186156988144,
"learning_rate": 4.59007551240561e-05,
"loss": 0.0049,
"step": 1790
},
{
"epoch": 17.0047572815534,
"grad_norm": 0.00388591387309134,
"learning_rate": 4.584681769147789e-05,
"loss": 0.0077,
"step": 1800
},
{
"epoch": 17.005728155339806,
"grad_norm": 0.006087481044232845,
"learning_rate": 4.579288025889968e-05,
"loss": 0.0043,
"step": 1810
},
{
"epoch": 17.006699029126214,
"grad_norm": 1.9510691165924072,
"learning_rate": 4.5738942826321465e-05,
"loss": 0.0034,
"step": 1820
},
{
"epoch": 17.00766990291262,
"grad_norm": 33.90534210205078,
"learning_rate": 4.568500539374326e-05,
"loss": 0.8541,
"step": 1830
},
{
"epoch": 17.00864077669903,
"grad_norm": 0.07192683219909668,
"learning_rate": 4.5631067961165054e-05,
"loss": 0.0401,
"step": 1840
},
{
"epoch": 17.009611650485436,
"grad_norm": 0.010408276692032814,
"learning_rate": 4.557713052858684e-05,
"loss": 0.0013,
"step": 1850
},
{
"epoch": 17.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.841443657875061,
"eval_runtime": 23.3379,
"eval_samples_per_second": 1.2,
"eval_steps_per_second": 0.6,
"step": 1854
},
{
"epoch": 18.000582524271845,
"grad_norm": 8.814678192138672,
"learning_rate": 4.552319309600863e-05,
"loss": 0.1217,
"step": 1860
},
{
"epoch": 18.001553398058252,
"grad_norm": 0.012438790872693062,
"learning_rate": 4.546925566343042e-05,
"loss": 0.0109,
"step": 1870
},
{
"epoch": 18.00252427184466,
"grad_norm": 0.008642777800559998,
"learning_rate": 4.541531823085222e-05,
"loss": 0.223,
"step": 1880
},
{
"epoch": 18.003495145631067,
"grad_norm": 0.16180367767810822,
"learning_rate": 4.5361380798274005e-05,
"loss": 0.2207,
"step": 1890
},
{
"epoch": 18.004466019417475,
"grad_norm": 0.007616588845849037,
"learning_rate": 4.530744336569579e-05,
"loss": 0.4434,
"step": 1900
},
{
"epoch": 18.005436893203882,
"grad_norm": 0.2378826141357422,
"learning_rate": 4.525350593311759e-05,
"loss": 0.1446,
"step": 1910
},
{
"epoch": 18.006407766990293,
"grad_norm": 0.11261074244976044,
"learning_rate": 4.5199568500539374e-05,
"loss": 0.6324,
"step": 1920
},
{
"epoch": 18.0073786407767,
"grad_norm": 0.07784093916416168,
"learning_rate": 4.514563106796117e-05,
"loss": 0.0289,
"step": 1930
},
{
"epoch": 18.008349514563108,
"grad_norm": 47.64828872680664,
"learning_rate": 4.5091693635382956e-05,
"loss": 0.2385,
"step": 1940
},
{
"epoch": 18.009320388349515,
"grad_norm": 0.006568429991602898,
"learning_rate": 4.503775620280475e-05,
"loss": 0.0358,
"step": 1950
},
{
"epoch": 18.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 0.9031981825828552,
"eval_runtime": 25.6623,
"eval_samples_per_second": 1.091,
"eval_steps_per_second": 0.546,
"step": 1957
},
{
"epoch": 19.00029126213592,
"grad_norm": 0.017097100615501404,
"learning_rate": 4.498381877022654e-05,
"loss": 0.0364,
"step": 1960
},
{
"epoch": 19.00126213592233,
"grad_norm": 0.04976806417107582,
"learning_rate": 4.4929881337648325e-05,
"loss": 0.6286,
"step": 1970
},
{
"epoch": 19.00223300970874,
"grad_norm": 3.1717705726623535,
"learning_rate": 4.487594390507012e-05,
"loss": 0.2046,
"step": 1980
},
{
"epoch": 19.003203883495146,
"grad_norm": 2.156081199645996,
"learning_rate": 4.4822006472491914e-05,
"loss": 0.3576,
"step": 1990
},
{
"epoch": 19.004174757281554,
"grad_norm": 0.04831140488386154,
"learning_rate": 4.47680690399137e-05,
"loss": 0.1893,
"step": 2000
},
{
"epoch": 19.00514563106796,
"grad_norm": 0.048623573035001755,
"learning_rate": 4.471413160733549e-05,
"loss": 0.0239,
"step": 2010
},
{
"epoch": 19.00611650485437,
"grad_norm": 0.008135631680488586,
"learning_rate": 4.466019417475728e-05,
"loss": 0.0703,
"step": 2020
},
{
"epoch": 19.007087378640776,
"grad_norm": 1.1402318477630615,
"learning_rate": 4.460625674217908e-05,
"loss": 0.1388,
"step": 2030
},
{
"epoch": 19.008058252427183,
"grad_norm": 0.004213957116007805,
"learning_rate": 4.4552319309600865e-05,
"loss": 0.0008,
"step": 2040
},
{
"epoch": 19.00902912621359,
"grad_norm": 0.006400398910045624,
"learning_rate": 4.449838187702265e-05,
"loss": 0.2884,
"step": 2050
},
{
"epoch": 19.01,
"grad_norm": 30.39129638671875,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.0955,
"step": 2060
},
{
"epoch": 19.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 0.9296838045120239,
"eval_runtime": 24.9812,
"eval_samples_per_second": 1.121,
"eval_steps_per_second": 0.56,
"step": 2060
},
{
"epoch": 20.000970873786407,
"grad_norm": 0.12476064264774323,
"learning_rate": 4.439050701186624e-05,
"loss": 0.0018,
"step": 2070
},
{
"epoch": 20.001941747572815,
"grad_norm": 0.014465524815022945,
"learning_rate": 4.433656957928803e-05,
"loss": 0.0003,
"step": 2080
},
{
"epoch": 20.002912621359222,
"grad_norm": 0.1726539433002472,
"learning_rate": 4.4282632146709816e-05,
"loss": 0.0178,
"step": 2090
},
{
"epoch": 20.00388349514563,
"grad_norm": 0.011210871860384941,
"learning_rate": 4.422869471413161e-05,
"loss": 0.0084,
"step": 2100
},
{
"epoch": 20.00485436893204,
"grad_norm": 0.03715604916214943,
"learning_rate": 4.4174757281553404e-05,
"loss": 0.0003,
"step": 2110
},
{
"epoch": 20.005825242718448,
"grad_norm": 3.4837474822998047,
"learning_rate": 4.412081984897519e-05,
"loss": 0.3603,
"step": 2120
},
{
"epoch": 20.006796116504855,
"grad_norm": 74.01284790039062,
"learning_rate": 4.406688241639698e-05,
"loss": 0.0891,
"step": 2130
},
{
"epoch": 20.007766990291262,
"grad_norm": 0.02112632617354393,
"learning_rate": 4.4012944983818774e-05,
"loss": 0.0005,
"step": 2140
},
{
"epoch": 20.00873786407767,
"grad_norm": 0.005703276488929987,
"learning_rate": 4.395900755124056e-05,
"loss": 0.1859,
"step": 2150
},
{
"epoch": 20.009708737864077,
"grad_norm": 0.012596944347023964,
"learning_rate": 4.3905070118662355e-05,
"loss": 0.2348,
"step": 2160
},
{
"epoch": 20.01,
"eval_accuracy": 0.75,
"eval_loss": 0.7981438040733337,
"eval_runtime": 22.456,
"eval_samples_per_second": 1.247,
"eval_steps_per_second": 0.623,
"step": 2163
},
{
"epoch": 21.000679611650487,
"grad_norm": 43.48505783081055,
"learning_rate": 4.385113268608414e-05,
"loss": 0.2406,
"step": 2170
},
{
"epoch": 21.001650485436894,
"grad_norm": 0.06352009624242783,
"learning_rate": 4.379719525350594e-05,
"loss": 0.0026,
"step": 2180
},
{
"epoch": 21.0026213592233,
"grad_norm": 0.04188496246933937,
"learning_rate": 4.3743257820927725e-05,
"loss": 0.0952,
"step": 2190
},
{
"epoch": 21.00359223300971,
"grad_norm": 61.67686462402344,
"learning_rate": 4.368932038834951e-05,
"loss": 0.2724,
"step": 2200
},
{
"epoch": 21.004563106796116,
"grad_norm": 0.006463396828621626,
"learning_rate": 4.3635382955771306e-05,
"loss": 0.0227,
"step": 2210
},
{
"epoch": 21.005533980582523,
"grad_norm": 0.2539374828338623,
"learning_rate": 4.35814455231931e-05,
"loss": 0.1527,
"step": 2220
},
{
"epoch": 21.00650485436893,
"grad_norm": 0.0034482753835618496,
"learning_rate": 4.352750809061489e-05,
"loss": 0.0016,
"step": 2230
},
{
"epoch": 21.007475728155338,
"grad_norm": 0.006562869064509869,
"learning_rate": 4.3473570658036676e-05,
"loss": 0.0653,
"step": 2240
},
{
"epoch": 21.00844660194175,
"grad_norm": 0.01731286384165287,
"learning_rate": 4.341963322545847e-05,
"loss": 0.6584,
"step": 2250
},
{
"epoch": 21.009417475728156,
"grad_norm": 9.085536003112793,
"learning_rate": 4.3365695792880264e-05,
"loss": 0.0056,
"step": 2260
},
{
"epoch": 21.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 0.9081144332885742,
"eval_runtime": 23.6824,
"eval_samples_per_second": 1.182,
"eval_steps_per_second": 0.591,
"step": 2266
},
{
"epoch": 22.000388349514562,
"grad_norm": 0.05535823479294777,
"learning_rate": 4.331175836030205e-05,
"loss": 0.023,
"step": 2270
},
{
"epoch": 22.00135922330097,
"grad_norm": 0.13629233837127686,
"learning_rate": 4.325782092772384e-05,
"loss": 0.0011,
"step": 2280
},
{
"epoch": 22.002330097087377,
"grad_norm": 0.004985070787370205,
"learning_rate": 4.3203883495145634e-05,
"loss": 0.0641,
"step": 2290
},
{
"epoch": 22.003300970873788,
"grad_norm": 0.007712341845035553,
"learning_rate": 4.314994606256743e-05,
"loss": 0.0974,
"step": 2300
},
{
"epoch": 22.004271844660195,
"grad_norm": 0.7671927809715271,
"learning_rate": 4.3096008629989215e-05,
"loss": 0.1002,
"step": 2310
},
{
"epoch": 22.005242718446603,
"grad_norm": 0.002474966924637556,
"learning_rate": 4.3042071197411e-05,
"loss": 0.0046,
"step": 2320
},
{
"epoch": 22.00621359223301,
"grad_norm": 0.007814603857696056,
"learning_rate": 4.29881337648328e-05,
"loss": 0.0004,
"step": 2330
},
{
"epoch": 22.007184466019417,
"grad_norm": 0.002644519554451108,
"learning_rate": 4.293419633225459e-05,
"loss": 0.1729,
"step": 2340
},
{
"epoch": 22.008155339805825,
"grad_norm": 0.008584373630583286,
"learning_rate": 4.288025889967638e-05,
"loss": 0.4692,
"step": 2350
},
{
"epoch": 22.009126213592232,
"grad_norm": 0.2353517860174179,
"learning_rate": 4.2826321467098166e-05,
"loss": 0.2562,
"step": 2360
},
{
"epoch": 22.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.7719129920005798,
"eval_runtime": 24.0994,
"eval_samples_per_second": 1.162,
"eval_steps_per_second": 0.581,
"step": 2369
},
{
"epoch": 23.00009708737864,
"grad_norm": 22.082792282104492,
"learning_rate": 4.277238403451996e-05,
"loss": 0.0611,
"step": 2370
},
{
"epoch": 23.00106796116505,
"grad_norm": 0.15429353713989258,
"learning_rate": 4.271844660194175e-05,
"loss": 0.0008,
"step": 2380
},
{
"epoch": 23.002038834951456,
"grad_norm": 0.003812693292275071,
"learning_rate": 4.266450916936354e-05,
"loss": 0.0044,
"step": 2390
},
{
"epoch": 23.003009708737864,
"grad_norm": 0.003263086313381791,
"learning_rate": 4.261057173678533e-05,
"loss": 0.0019,
"step": 2400
},
{
"epoch": 23.00398058252427,
"grad_norm": 0.010048700496554375,
"learning_rate": 4.2556634304207124e-05,
"loss": 0.0008,
"step": 2410
},
{
"epoch": 23.00495145631068,
"grad_norm": 44.61221694946289,
"learning_rate": 4.250269687162891e-05,
"loss": 0.1866,
"step": 2420
},
{
"epoch": 23.005922330097086,
"grad_norm": 0.005064029712229967,
"learning_rate": 4.2448759439050706e-05,
"loss": 0.2712,
"step": 2430
},
{
"epoch": 23.006893203883497,
"grad_norm": 0.003152214689180255,
"learning_rate": 4.2394822006472493e-05,
"loss": 0.046,
"step": 2440
},
{
"epoch": 23.007864077669904,
"grad_norm": 0.005216572899371386,
"learning_rate": 4.234088457389429e-05,
"loss": 0.0003,
"step": 2450
},
{
"epoch": 23.00883495145631,
"grad_norm": 0.23457589745521545,
"learning_rate": 4.2286947141316075e-05,
"loss": 0.001,
"step": 2460
},
{
"epoch": 23.00980582524272,
"grad_norm": 0.014046065509319305,
"learning_rate": 4.223300970873786e-05,
"loss": 0.0007,
"step": 2470
},
{
"epoch": 23.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.2594319581985474,
"eval_runtime": 23.6324,
"eval_samples_per_second": 1.185,
"eval_steps_per_second": 0.592,
"step": 2472
},
{
"epoch": 24.000776699029124,
"grad_norm": 0.004663554951548576,
"learning_rate": 4.217907227615966e-05,
"loss": 0.0011,
"step": 2480
},
{
"epoch": 24.001747572815535,
"grad_norm": 0.0048858774825930595,
"learning_rate": 4.212513484358145e-05,
"loss": 0.1507,
"step": 2490
},
{
"epoch": 24.002718446601943,
"grad_norm": 0.002386435866355896,
"learning_rate": 4.207119741100324e-05,
"loss": 0.0161,
"step": 2500
},
{
"epoch": 24.00368932038835,
"grad_norm": 0.005369072314351797,
"learning_rate": 4.2017259978425026e-05,
"loss": 0.0004,
"step": 2510
},
{
"epoch": 24.004660194174757,
"grad_norm": 42.52135467529297,
"learning_rate": 4.1963322545846814e-05,
"loss": 0.5132,
"step": 2520
},
{
"epoch": 24.005631067961165,
"grad_norm": 0.01311649102717638,
"learning_rate": 4.1909385113268615e-05,
"loss": 0.0005,
"step": 2530
},
{
"epoch": 24.006601941747572,
"grad_norm": 0.010897999629378319,
"learning_rate": 4.18554476806904e-05,
"loss": 0.0004,
"step": 2540
},
{
"epoch": 24.00757281553398,
"grad_norm": 0.007435834035277367,
"learning_rate": 4.180151024811219e-05,
"loss": 0.1525,
"step": 2550
},
{
"epoch": 24.008543689320387,
"grad_norm": 0.014184530824422836,
"learning_rate": 4.1747572815533984e-05,
"loss": 0.1444,
"step": 2560
},
{
"epoch": 24.009514563106798,
"grad_norm": 0.005532704293727875,
"learning_rate": 4.169363538295578e-05,
"loss": 0.5812,
"step": 2570
},
{
"epoch": 24.01,
"eval_accuracy": 0.6785714285714286,
"eval_loss": 1.4030581712722778,
"eval_runtime": 24.1137,
"eval_samples_per_second": 1.161,
"eval_steps_per_second": 0.581,
"step": 2575
},
{
"epoch": 25.000485436893204,
"grad_norm": 0.008427631109952927,
"learning_rate": 4.1639697950377566e-05,
"loss": 0.0714,
"step": 2580
},
{
"epoch": 25.00145631067961,
"grad_norm": 0.0074122268706560135,
"learning_rate": 4.158576051779935e-05,
"loss": 0.4105,
"step": 2590
},
{
"epoch": 25.00242718446602,
"grad_norm": 0.020593173801898956,
"learning_rate": 4.153182308522115e-05,
"loss": 0.3281,
"step": 2600
},
{
"epoch": 25.003398058252426,
"grad_norm": 2.3461203575134277,
"learning_rate": 4.1477885652642935e-05,
"loss": 0.0019,
"step": 2610
},
{
"epoch": 25.004368932038837,
"grad_norm": 0.0027224768418818712,
"learning_rate": 4.142394822006473e-05,
"loss": 0.0028,
"step": 2620
},
{
"epoch": 25.005339805825244,
"grad_norm": 0.002822762355208397,
"learning_rate": 4.137001078748652e-05,
"loss": 0.0002,
"step": 2630
},
{
"epoch": 25.00631067961165,
"grad_norm": 0.006026980467140675,
"learning_rate": 4.131607335490831e-05,
"loss": 0.1026,
"step": 2640
},
{
"epoch": 25.00728155339806,
"grad_norm": 1.4722844362258911,
"learning_rate": 4.12621359223301e-05,
"loss": 0.0961,
"step": 2650
},
{
"epoch": 25.008252427184466,
"grad_norm": 0.015537528321146965,
"learning_rate": 4.120819848975189e-05,
"loss": 0.1645,
"step": 2660
},
{
"epoch": 25.009223300970874,
"grad_norm": 6.544281482696533,
"learning_rate": 4.115426105717368e-05,
"loss": 0.0093,
"step": 2670
},
{
"epoch": 25.01,
"eval_accuracy": 0.75,
"eval_loss": 1.3273574113845825,
"eval_runtime": 23.4972,
"eval_samples_per_second": 1.192,
"eval_steps_per_second": 0.596,
"step": 2678
},
{
"epoch": 26.000194174757283,
"grad_norm": 0.003389996010810137,
"learning_rate": 4.1100323624595475e-05,
"loss": 0.1614,
"step": 2680
},
{
"epoch": 26.00116504854369,
"grad_norm": 0.0065567693673074245,
"learning_rate": 4.104638619201726e-05,
"loss": 0.0969,
"step": 2690
},
{
"epoch": 26.002135922330098,
"grad_norm": 0.006948347669094801,
"learning_rate": 4.099244875943905e-05,
"loss": 0.3507,
"step": 2700
},
{
"epoch": 26.003106796116505,
"grad_norm": 0.003539158497005701,
"learning_rate": 4.0938511326860844e-05,
"loss": 0.0166,
"step": 2710
},
{
"epoch": 26.004077669902912,
"grad_norm": 0.046311162412166595,
"learning_rate": 4.088457389428264e-05,
"loss": 0.0006,
"step": 2720
},
{
"epoch": 26.00504854368932,
"grad_norm": 2.4424049854278564,
"learning_rate": 4.0830636461704426e-05,
"loss": 0.5616,
"step": 2730
},
{
"epoch": 26.006019417475727,
"grad_norm": 0.0019011793192476034,
"learning_rate": 4.077669902912621e-05,
"loss": 0.0587,
"step": 2740
},
{
"epoch": 26.006990291262134,
"grad_norm": 0.007408740930259228,
"learning_rate": 4.0722761596548e-05,
"loss": 0.0411,
"step": 2750
},
{
"epoch": 26.007961165048545,
"grad_norm": 0.0064827739261090755,
"learning_rate": 4.06688241639698e-05,
"loss": 0.1173,
"step": 2760
},
{
"epoch": 26.008932038834953,
"grad_norm": 0.7080045342445374,
"learning_rate": 4.061488673139159e-05,
"loss": 0.0008,
"step": 2770
},
{
"epoch": 26.00990291262136,
"grad_norm": 0.004580841865390539,
"learning_rate": 4.056094929881338e-05,
"loss": 0.0601,
"step": 2780
},
{
"epoch": 26.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 0.636899471282959,
"eval_runtime": 20.328,
"eval_samples_per_second": 1.377,
"eval_steps_per_second": 0.689,
"step": 2781
},
{
"epoch": 27.000873786407766,
"grad_norm": 0.0028862841427326202,
"learning_rate": 4.0507011866235164e-05,
"loss": 0.2367,
"step": 2790
},
{
"epoch": 27.001844660194173,
"grad_norm": 0.0024308483116328716,
"learning_rate": 4.0453074433656965e-05,
"loss": 0.0223,
"step": 2800
},
{
"epoch": 27.002815533980584,
"grad_norm": 1.5417135953903198,
"learning_rate": 4.039913700107875e-05,
"loss": 0.024,
"step": 2810
},
{
"epoch": 27.00378640776699,
"grad_norm": 0.0034650780726224184,
"learning_rate": 4.034519956850054e-05,
"loss": 0.0018,
"step": 2820
},
{
"epoch": 27.0047572815534,
"grad_norm": 0.015140891075134277,
"learning_rate": 4.029126213592233e-05,
"loss": 0.0426,
"step": 2830
},
{
"epoch": 27.005728155339806,
"grad_norm": 0.003378878114745021,
"learning_rate": 4.023732470334412e-05,
"loss": 0.0012,
"step": 2840
},
{
"epoch": 27.006699029126214,
"grad_norm": 0.00436538876965642,
"learning_rate": 4.0183387270765916e-05,
"loss": 0.0008,
"step": 2850
},
{
"epoch": 27.00766990291262,
"grad_norm": 0.1134125143289566,
"learning_rate": 4.0129449838187704e-05,
"loss": 0.2414,
"step": 2860
},
{
"epoch": 27.00864077669903,
"grad_norm": 0.03811090439558029,
"learning_rate": 4.007551240560949e-05,
"loss": 0.002,
"step": 2870
},
{
"epoch": 27.009611650485436,
"grad_norm": 0.002944665728136897,
"learning_rate": 4.0021574973031286e-05,
"loss": 0.0499,
"step": 2880
},
{
"epoch": 27.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 0.9517186284065247,
"eval_runtime": 20.8069,
"eval_samples_per_second": 1.346,
"eval_steps_per_second": 0.673,
"step": 2884
},
{
"epoch": 28.000582524271845,
"grad_norm": 0.0016248354222625494,
"learning_rate": 3.996763754045308e-05,
"loss": 0.0002,
"step": 2890
},
{
"epoch": 28.001553398058252,
"grad_norm": 0.008152902126312256,
"learning_rate": 3.991370010787487e-05,
"loss": 0.0004,
"step": 2900
},
{
"epoch": 28.00252427184466,
"grad_norm": 0.006454968359321356,
"learning_rate": 3.9859762675296655e-05,
"loss": 0.0341,
"step": 2910
},
{
"epoch": 28.003495145631067,
"grad_norm": 0.05302952602505684,
"learning_rate": 3.980582524271845e-05,
"loss": 0.384,
"step": 2920
},
{
"epoch": 28.004466019417475,
"grad_norm": 0.018912069499492645,
"learning_rate": 3.9751887810140237e-05,
"loss": 0.3019,
"step": 2930
},
{
"epoch": 28.005436893203882,
"grad_norm": 0.004151844419538975,
"learning_rate": 3.969795037756203e-05,
"loss": 0.4579,
"step": 2940
},
{
"epoch": 28.006407766990293,
"grad_norm": 0.12463968992233276,
"learning_rate": 3.964401294498382e-05,
"loss": 0.0462,
"step": 2950
},
{
"epoch": 28.0073786407767,
"grad_norm": 0.0020370371639728546,
"learning_rate": 3.959007551240561e-05,
"loss": 0.0165,
"step": 2960
},
{
"epoch": 28.008349514563108,
"grad_norm": 0.005118839908391237,
"learning_rate": 3.95361380798274e-05,
"loss": 0.2025,
"step": 2970
},
{
"epoch": 28.009320388349515,
"grad_norm": 0.009085087105631828,
"learning_rate": 3.948220064724919e-05,
"loss": 0.0533,
"step": 2980
},
{
"epoch": 28.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 0.7188411355018616,
"eval_runtime": 23.1485,
"eval_samples_per_second": 1.21,
"eval_steps_per_second": 0.605,
"step": 2987
},
{
"epoch": 29.00029126213592,
"grad_norm": 0.006163634825497866,
"learning_rate": 3.942826321467098e-05,
"loss": 0.6331,
"step": 2990
},
{
"epoch": 29.00126213592233,
"grad_norm": 0.002468701219186187,
"learning_rate": 3.9374325782092776e-05,
"loss": 0.0124,
"step": 3000
},
{
"epoch": 29.00223300970874,
"grad_norm": 51.382347106933594,
"learning_rate": 3.9320388349514564e-05,
"loss": 0.4334,
"step": 3010
},
{
"epoch": 29.003203883495146,
"grad_norm": 0.0053183939307928085,
"learning_rate": 3.926645091693635e-05,
"loss": 0.1082,
"step": 3020
},
{
"epoch": 29.004174757281554,
"grad_norm": 0.044368501752614975,
"learning_rate": 3.9212513484358145e-05,
"loss": 0.1025,
"step": 3030
},
{
"epoch": 29.00514563106796,
"grad_norm": 5.252374649047852,
"learning_rate": 3.915857605177994e-05,
"loss": 0.3806,
"step": 3040
},
{
"epoch": 29.00611650485437,
"grad_norm": 0.004352473188191652,
"learning_rate": 3.910463861920173e-05,
"loss": 0.252,
"step": 3050
},
{
"epoch": 29.007087378640776,
"grad_norm": 89.59004211425781,
"learning_rate": 3.9050701186623515e-05,
"loss": 0.3862,
"step": 3060
},
{
"epoch": 29.008058252427183,
"grad_norm": 0.020154712721705437,
"learning_rate": 3.899676375404531e-05,
"loss": 0.2508,
"step": 3070
},
{
"epoch": 29.00902912621359,
"grad_norm": 0.0032096486538648605,
"learning_rate": 3.89428263214671e-05,
"loss": 0.0016,
"step": 3080
},
{
"epoch": 29.01,
"grad_norm": 25.873830795288086,
"learning_rate": 3.888888888888889e-05,
"loss": 0.4355,
"step": 3090
},
{
"epoch": 29.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.8157760500907898,
"eval_runtime": 22.8775,
"eval_samples_per_second": 1.224,
"eval_steps_per_second": 0.612,
"step": 3090
},
{
"epoch": 30.000970873786407,
"grad_norm": 0.0016481442144140601,
"learning_rate": 3.883495145631068e-05,
"loss": 0.3835,
"step": 3100
},
{
"epoch": 30.001941747572815,
"grad_norm": 0.03597341477870941,
"learning_rate": 3.878101402373247e-05,
"loss": 0.0165,
"step": 3110
},
{
"epoch": 30.002912621359222,
"grad_norm": 0.029974710196256638,
"learning_rate": 3.872707659115427e-05,
"loss": 0.0767,
"step": 3120
},
{
"epoch": 30.00388349514563,
"grad_norm": 0.007997593842446804,
"learning_rate": 3.8673139158576054e-05,
"loss": 0.2748,
"step": 3130
},
{
"epoch": 30.00485436893204,
"grad_norm": 13.744990348815918,
"learning_rate": 3.861920172599784e-05,
"loss": 0.0812,
"step": 3140
},
{
"epoch": 30.005825242718448,
"grad_norm": 12.372878074645996,
"learning_rate": 3.8565264293419636e-05,
"loss": 0.0295,
"step": 3150
},
{
"epoch": 30.006796116504855,
"grad_norm": 0.027433600276708603,
"learning_rate": 3.8511326860841424e-05,
"loss": 0.111,
"step": 3160
},
{
"epoch": 30.007766990291262,
"grad_norm": 0.014044610783457756,
"learning_rate": 3.845738942826322e-05,
"loss": 0.0134,
"step": 3170
},
{
"epoch": 30.00873786407767,
"grad_norm": 47.62761306762695,
"learning_rate": 3.8403451995685005e-05,
"loss": 0.3555,
"step": 3180
},
{
"epoch": 30.009708737864077,
"grad_norm": 3.7249436378479004,
"learning_rate": 3.83495145631068e-05,
"loss": 0.3554,
"step": 3190
},
{
"epoch": 30.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.0249096155166626,
"eval_runtime": 22.5053,
"eval_samples_per_second": 1.244,
"eval_steps_per_second": 0.622,
"step": 3193
},
{
"epoch": 31.000679611650487,
"grad_norm": 0.017235511913895607,
"learning_rate": 3.829557713052859e-05,
"loss": 0.0052,
"step": 3200
},
{
"epoch": 31.001650485436894,
"grad_norm": 0.0296916626393795,
"learning_rate": 3.824163969795038e-05,
"loss": 0.1188,
"step": 3210
},
{
"epoch": 31.0026213592233,
"grad_norm": 0.0033728531561791897,
"learning_rate": 3.818770226537217e-05,
"loss": 0.0472,
"step": 3220
},
{
"epoch": 31.00359223300971,
"grad_norm": 0.0023898575454950333,
"learning_rate": 3.813376483279396e-05,
"loss": 0.0216,
"step": 3230
},
{
"epoch": 31.004563106796116,
"grad_norm": 0.001669862074777484,
"learning_rate": 3.807982740021575e-05,
"loss": 0.0017,
"step": 3240
},
{
"epoch": 31.005533980582523,
"grad_norm": 0.015244018286466599,
"learning_rate": 3.802588996763754e-05,
"loss": 0.4042,
"step": 3250
},
{
"epoch": 31.00650485436893,
"grad_norm": 0.28988704085350037,
"learning_rate": 3.797195253505933e-05,
"loss": 0.1524,
"step": 3260
},
{
"epoch": 31.007475728155338,
"grad_norm": 0.0027148572262376547,
"learning_rate": 3.791801510248113e-05,
"loss": 0.031,
"step": 3270
},
{
"epoch": 31.00844660194175,
"grad_norm": 0.01755693554878235,
"learning_rate": 3.7864077669902914e-05,
"loss": 0.2511,
"step": 3280
},
{
"epoch": 31.009417475728156,
"grad_norm": 0.004436411429196596,
"learning_rate": 3.78101402373247e-05,
"loss": 0.1261,
"step": 3290
},
{
"epoch": 31.01,
"eval_accuracy": 0.75,
"eval_loss": 1.1735308170318604,
"eval_runtime": 23.626,
"eval_samples_per_second": 1.185,
"eval_steps_per_second": 0.593,
"step": 3296
},
{
"epoch": 32.000388349514566,
"grad_norm": 0.0023837178014218807,
"learning_rate": 3.7756202804746496e-05,
"loss": 0.1297,
"step": 3300
},
{
"epoch": 32.00135922330097,
"grad_norm": 23.975601196289062,
"learning_rate": 3.770226537216829e-05,
"loss": 0.3776,
"step": 3310
},
{
"epoch": 32.00233009708738,
"grad_norm": 0.0008600156870670617,
"learning_rate": 3.764832793959008e-05,
"loss": 0.2884,
"step": 3320
},
{
"epoch": 32.00330097087379,
"grad_norm": 28.76761817932129,
"learning_rate": 3.7594390507011865e-05,
"loss": 0.0325,
"step": 3330
},
{
"epoch": 32.004271844660195,
"grad_norm": 10.069014549255371,
"learning_rate": 3.754045307443366e-05,
"loss": 0.025,
"step": 3340
},
{
"epoch": 32.0052427184466,
"grad_norm": 0.003169337520375848,
"learning_rate": 3.7486515641855454e-05,
"loss": 0.1093,
"step": 3350
},
{
"epoch": 32.00621359223301,
"grad_norm": 0.014221856370568275,
"learning_rate": 3.743257820927724e-05,
"loss": 0.0475,
"step": 3360
},
{
"epoch": 32.00718446601942,
"grad_norm": 33.33575439453125,
"learning_rate": 3.737864077669903e-05,
"loss": 0.1803,
"step": 3370
},
{
"epoch": 32.008155339805825,
"grad_norm": 0.0034332312643527985,
"learning_rate": 3.732470334412082e-05,
"loss": 0.178,
"step": 3380
},
{
"epoch": 32.00912621359223,
"grad_norm": 0.026909692212939262,
"learning_rate": 3.727076591154261e-05,
"loss": 0.0006,
"step": 3390
},
{
"epoch": 32.01,
"eval_accuracy": 0.7142857142857143,
"eval_loss": 1.9294798374176025,
"eval_runtime": 24.8267,
"eval_samples_per_second": 1.128,
"eval_steps_per_second": 0.564,
"step": 3399
},
{
"epoch": 33.00009708737864,
"grad_norm": 0.001809462788514793,
"learning_rate": 3.7216828478964405e-05,
"loss": 0.0013,
"step": 3400
},
{
"epoch": 33.00106796116505,
"grad_norm": 0.004172711167484522,
"learning_rate": 3.716289104638619e-05,
"loss": 0.0792,
"step": 3410
},
{
"epoch": 33.002038834951456,
"grad_norm": 0.004106584470719099,
"learning_rate": 3.7108953613807987e-05,
"loss": 0.0006,
"step": 3420
},
{
"epoch": 33.00300970873786,
"grad_norm": 0.0020495557691901922,
"learning_rate": 3.7055016181229774e-05,
"loss": 0.0029,
"step": 3430
},
{
"epoch": 33.00398058252427,
"grad_norm": 29.851099014282227,
"learning_rate": 3.700107874865157e-05,
"loss": 0.2052,
"step": 3440
},
{
"epoch": 33.00495145631068,
"grad_norm": 0.002838430693373084,
"learning_rate": 3.6947141316073356e-05,
"loss": 0.1413,
"step": 3450
},
{
"epoch": 33.005922330097086,
"grad_norm": 0.029243992641568184,
"learning_rate": 3.689320388349515e-05,
"loss": 0.0004,
"step": 3460
},
{
"epoch": 33.00689320388349,
"grad_norm": 0.02334713563323021,
"learning_rate": 3.683926645091694e-05,
"loss": 0.49,
"step": 3470
},
{
"epoch": 33.0078640776699,
"grad_norm": 0.0033061353024095297,
"learning_rate": 3.6785329018338725e-05,
"loss": 0.1975,
"step": 3480
},
{
"epoch": 33.00883495145631,
"grad_norm": 0.002055556047707796,
"learning_rate": 3.673139158576052e-05,
"loss": 0.036,
"step": 3490
},
{
"epoch": 33.009805825242715,
"grad_norm": 0.0014392153825610876,
"learning_rate": 3.6677454153182314e-05,
"loss": 0.649,
"step": 3500
},
{
"epoch": 33.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 0.6239737868309021,
"eval_runtime": 22.3594,
"eval_samples_per_second": 1.252,
"eval_steps_per_second": 0.626,
"step": 3502
},
{
"epoch": 34.000776699029124,
"grad_norm": 0.28017091751098633,
"learning_rate": 3.66235167206041e-05,
"loss": 0.1429,
"step": 3510
},
{
"epoch": 34.00174757281553,
"grad_norm": 0.018015265464782715,
"learning_rate": 3.656957928802589e-05,
"loss": 0.0991,
"step": 3520
},
{
"epoch": 34.00271844660194,
"grad_norm": 0.010815299116075039,
"learning_rate": 3.651564185544768e-05,
"loss": 0.0052,
"step": 3530
},
{
"epoch": 34.00368932038835,
"grad_norm": 0.0025419536978006363,
"learning_rate": 3.646170442286948e-05,
"loss": 0.0005,
"step": 3540
},
{
"epoch": 34.004660194174754,
"grad_norm": 0.0013998536160215735,
"learning_rate": 3.6407766990291265e-05,
"loss": 0.0003,
"step": 3550
},
{
"epoch": 34.00563106796117,
"grad_norm": 0.007435066159814596,
"learning_rate": 3.635382955771305e-05,
"loss": 0.0137,
"step": 3560
},
{
"epoch": 34.006601941747576,
"grad_norm": 0.0021152186673134565,
"learning_rate": 3.6299892125134846e-05,
"loss": 0.0893,
"step": 3570
},
{
"epoch": 34.00757281553398,
"grad_norm": 6.415030002593994,
"learning_rate": 3.624595469255664e-05,
"loss": 0.2329,
"step": 3580
},
{
"epoch": 34.00854368932039,
"grad_norm": 0.002037628088146448,
"learning_rate": 3.619201725997843e-05,
"loss": 0.114,
"step": 3590
},
{
"epoch": 34.0095145631068,
"grad_norm": 0.002441792283207178,
"learning_rate": 3.6138079827400216e-05,
"loss": 0.0635,
"step": 3600
},
{
"epoch": 34.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 0.9778858423233032,
"eval_runtime": 22.8088,
"eval_samples_per_second": 1.228,
"eval_steps_per_second": 0.614,
"step": 3605
},
{
"epoch": 35.00048543689321,
"grad_norm": 0.5903840661048889,
"learning_rate": 3.608414239482201e-05,
"loss": 0.0009,
"step": 3610
},
{
"epoch": 35.001456310679615,
"grad_norm": 0.0017763852374628186,
"learning_rate": 3.60302049622438e-05,
"loss": 0.0742,
"step": 3620
},
{
"epoch": 35.00242718446602,
"grad_norm": 0.010188854299485683,
"learning_rate": 3.597626752966559e-05,
"loss": 0.02,
"step": 3630
},
{
"epoch": 35.00339805825243,
"grad_norm": 39.74284744262695,
"learning_rate": 3.592233009708738e-05,
"loss": 0.2593,
"step": 3640
},
{
"epoch": 35.00436893203884,
"grad_norm": 33.84955978393555,
"learning_rate": 3.5868392664509174e-05,
"loss": 0.462,
"step": 3650
},
{
"epoch": 35.005339805825244,
"grad_norm": 0.0023876363411545753,
"learning_rate": 3.581445523193096e-05,
"loss": 0.0004,
"step": 3660
},
{
"epoch": 35.00631067961165,
"grad_norm": 0.0012121149338781834,
"learning_rate": 3.5760517799352755e-05,
"loss": 0.0084,
"step": 3670
},
{
"epoch": 35.00728155339806,
"grad_norm": 0.002630241448059678,
"learning_rate": 3.570658036677454e-05,
"loss": 0.0949,
"step": 3680
},
{
"epoch": 35.008252427184466,
"grad_norm": 0.0016603067051619291,
"learning_rate": 3.565264293419634e-05,
"loss": 0.1554,
"step": 3690
},
{
"epoch": 35.00922330097087,
"grad_norm": 0.056095294654369354,
"learning_rate": 3.5598705501618125e-05,
"loss": 0.0006,
"step": 3700
},
{
"epoch": 35.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 1.2439024448394775,
"eval_runtime": 23.3085,
"eval_samples_per_second": 1.201,
"eval_steps_per_second": 0.601,
"step": 3708
},
{
"epoch": 36.00019417475728,
"grad_norm": 64.94233703613281,
"learning_rate": 3.554476806903991e-05,
"loss": 0.1994,
"step": 3710
},
{
"epoch": 36.00116504854369,
"grad_norm": 0.04842671379446983,
"learning_rate": 3.5490830636461706e-05,
"loss": 0.0057,
"step": 3720
},
{
"epoch": 36.0021359223301,
"grad_norm": 0.009521760046482086,
"learning_rate": 3.54368932038835e-05,
"loss": 0.1354,
"step": 3730
},
{
"epoch": 36.003106796116505,
"grad_norm": 0.001494877622462809,
"learning_rate": 3.538295577130529e-05,
"loss": 0.0003,
"step": 3740
},
{
"epoch": 36.00407766990291,
"grad_norm": 0.005222668405622244,
"learning_rate": 3.5329018338727076e-05,
"loss": 0.4186,
"step": 3750
},
{
"epoch": 36.00504854368932,
"grad_norm": 0.0036867093294858932,
"learning_rate": 3.527508090614886e-05,
"loss": 0.1241,
"step": 3760
},
{
"epoch": 36.00601941747573,
"grad_norm": 0.027494311332702637,
"learning_rate": 3.5221143473570664e-05,
"loss": 0.3695,
"step": 3770
},
{
"epoch": 36.006990291262134,
"grad_norm": 20.542978286743164,
"learning_rate": 3.516720604099245e-05,
"loss": 0.1056,
"step": 3780
},
{
"epoch": 36.00796116504854,
"grad_norm": 0.0017354681622236967,
"learning_rate": 3.511326860841424e-05,
"loss": 0.0935,
"step": 3790
},
{
"epoch": 36.00893203883495,
"grad_norm": 4.373658180236816,
"learning_rate": 3.505933117583603e-05,
"loss": 0.0067,
"step": 3800
},
{
"epoch": 36.00990291262136,
"grad_norm": 0.0025413844268769026,
"learning_rate": 3.500539374325783e-05,
"loss": 0.2745,
"step": 3810
},
{
"epoch": 36.01,
"eval_accuracy": 0.75,
"eval_loss": 1.4499845504760742,
"eval_runtime": 23.865,
"eval_samples_per_second": 1.173,
"eval_steps_per_second": 0.587,
"step": 3811
},
{
"epoch": 37.000873786407766,
"grad_norm": 0.002057610312476754,
"learning_rate": 3.4951456310679615e-05,
"loss": 0.1987,
"step": 3820
},
{
"epoch": 37.00184466019417,
"grad_norm": 0.002994287759065628,
"learning_rate": 3.48975188781014e-05,
"loss": 0.0612,
"step": 3830
},
{
"epoch": 37.00281553398058,
"grad_norm": 0.009886479936540127,
"learning_rate": 3.484358144552319e-05,
"loss": 0.0002,
"step": 3840
},
{
"epoch": 37.00378640776699,
"grad_norm": 0.005265057552605867,
"learning_rate": 3.4789644012944984e-05,
"loss": 0.0028,
"step": 3850
},
{
"epoch": 37.004757281553395,
"grad_norm": 0.09004159271717072,
"learning_rate": 3.473570658036678e-05,
"loss": 0.289,
"step": 3860
},
{
"epoch": 37.0057281553398,
"grad_norm": 0.027610071003437042,
"learning_rate": 3.4681769147788566e-05,
"loss": 0.2624,
"step": 3870
},
{
"epoch": 37.00669902912621,
"grad_norm": 0.003753015771508217,
"learning_rate": 3.4627831715210354e-05,
"loss": 0.0016,
"step": 3880
},
{
"epoch": 37.007669902912625,
"grad_norm": 0.021402668207883835,
"learning_rate": 3.457389428263215e-05,
"loss": 0.247,
"step": 3890
},
{
"epoch": 37.00864077669903,
"grad_norm": 0.0024451317731291056,
"learning_rate": 3.451995685005394e-05,
"loss": 0.4548,
"step": 3900
},
{
"epoch": 37.00961165048544,
"grad_norm": 0.0038541662506759167,
"learning_rate": 3.446601941747573e-05,
"loss": 0.4486,
"step": 3910
},
{
"epoch": 37.01,
"eval_accuracy": 0.8928571428571429,
"eval_loss": 0.6353511214256287,
"eval_runtime": 22.4342,
"eval_samples_per_second": 1.248,
"eval_steps_per_second": 0.624,
"step": 3914
},
{
"epoch": 38.00058252427184,
"grad_norm": 0.005073450040072203,
"learning_rate": 3.4412081984897524e-05,
"loss": 0.1806,
"step": 3920
},
{
"epoch": 38.00155339805825,
"grad_norm": 0.0008047996670939028,
"learning_rate": 3.435814455231931e-05,
"loss": 0.0007,
"step": 3930
},
{
"epoch": 38.00252427184466,
"grad_norm": 0.0032085394486784935,
"learning_rate": 3.43042071197411e-05,
"loss": 0.0022,
"step": 3940
},
{
"epoch": 38.00349514563107,
"grad_norm": 2.118870973587036,
"learning_rate": 3.425026968716289e-05,
"loss": 0.0019,
"step": 3950
},
{
"epoch": 38.00446601941748,
"grad_norm": 2.797478199005127,
"learning_rate": 3.419633225458469e-05,
"loss": 0.0072,
"step": 3960
},
{
"epoch": 38.005436893203886,
"grad_norm": 1.8579610586166382,
"learning_rate": 3.4142394822006475e-05,
"loss": 0.2237,
"step": 3970
},
{
"epoch": 38.00640776699029,
"grad_norm": 0.015792952850461006,
"learning_rate": 3.408845738942826e-05,
"loss": 0.7753,
"step": 3980
},
{
"epoch": 38.0073786407767,
"grad_norm": 0.11559689044952393,
"learning_rate": 3.403451995685006e-05,
"loss": 0.2666,
"step": 3990
},
{
"epoch": 38.00834951456311,
"grad_norm": 0.0116598354652524,
"learning_rate": 3.398058252427185e-05,
"loss": 0.1389,
"step": 4000
},
{
"epoch": 38.009320388349515,
"grad_norm": 0.002214794047176838,
"learning_rate": 3.392664509169364e-05,
"loss": 0.0016,
"step": 4010
},
{
"epoch": 38.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 1.1773262023925781,
"eval_runtime": 23.0373,
"eval_samples_per_second": 1.215,
"eval_steps_per_second": 0.608,
"step": 4017
},
{
"epoch": 39.000291262135924,
"grad_norm": 0.043513890355825424,
"learning_rate": 3.3872707659115426e-05,
"loss": 0.3503,
"step": 4020
},
{
"epoch": 39.00126213592233,
"grad_norm": 0.0016833103727549314,
"learning_rate": 3.3818770226537214e-05,
"loss": 0.0069,
"step": 4030
},
{
"epoch": 39.00223300970874,
"grad_norm": 0.00472614960744977,
"learning_rate": 3.3764832793959015e-05,
"loss": 0.0083,
"step": 4040
},
{
"epoch": 39.003203883495146,
"grad_norm": 0.003099585883319378,
"learning_rate": 3.37108953613808e-05,
"loss": 0.2343,
"step": 4050
},
{
"epoch": 39.004174757281554,
"grad_norm": 0.0013699843548238277,
"learning_rate": 3.365695792880259e-05,
"loss": 0.2691,
"step": 4060
},
{
"epoch": 39.00514563106796,
"grad_norm": 0.021296612918376923,
"learning_rate": 3.360302049622438e-05,
"loss": 0.0003,
"step": 4070
},
{
"epoch": 39.00611650485437,
"grad_norm": 0.0008244297350756824,
"learning_rate": 3.354908306364617e-05,
"loss": 0.0004,
"step": 4080
},
{
"epoch": 39.007087378640776,
"grad_norm": 0.0007789231603965163,
"learning_rate": 3.3495145631067966e-05,
"loss": 0.0023,
"step": 4090
},
{
"epoch": 39.00805825242718,
"grad_norm": 0.0026045972481369972,
"learning_rate": 3.344120819848975e-05,
"loss": 0.1012,
"step": 4100
},
{
"epoch": 39.00902912621359,
"grad_norm": 0.007781523279845715,
"learning_rate": 3.338727076591154e-05,
"loss": 0.0002,
"step": 4110
},
{
"epoch": 39.01,
"grad_norm": 0.003086633747443557,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.2667,
"step": 4120
},
{
"epoch": 39.01,
"eval_accuracy": 0.75,
"eval_loss": 1.2261667251586914,
"eval_runtime": 23.171,
"eval_samples_per_second": 1.208,
"eval_steps_per_second": 0.604,
"step": 4120
},
{
"epoch": 40.00097087378641,
"grad_norm": 0.0011974448570981622,
"learning_rate": 3.327939590075513e-05,
"loss": 0.3577,
"step": 4130
},
{
"epoch": 40.001941747572815,
"grad_norm": 0.0012991767143830657,
"learning_rate": 3.322545846817692e-05,
"loss": 0.1698,
"step": 4140
},
{
"epoch": 40.00291262135922,
"grad_norm": 0.0018923853058367968,
"learning_rate": 3.3171521035598704e-05,
"loss": 0.003,
"step": 4150
},
{
"epoch": 40.00388349514563,
"grad_norm": 0.032640498131513596,
"learning_rate": 3.31175836030205e-05,
"loss": 0.0241,
"step": 4160
},
{
"epoch": 40.00485436893204,
"grad_norm": 0.001086374861188233,
"learning_rate": 3.3063646170442286e-05,
"loss": 0.0104,
"step": 4170
},
{
"epoch": 40.005825242718444,
"grad_norm": 0.03813697770237923,
"learning_rate": 3.300970873786408e-05,
"loss": 0.0665,
"step": 4180
},
{
"epoch": 40.00679611650485,
"grad_norm": 0.001190397422760725,
"learning_rate": 3.295577130528587e-05,
"loss": 0.1932,
"step": 4190
},
{
"epoch": 40.00776699029126,
"grad_norm": 0.0007828500238247216,
"learning_rate": 3.290183387270766e-05,
"loss": 0.0001,
"step": 4200
},
{
"epoch": 40.00873786407767,
"grad_norm": 0.0014887334546074271,
"learning_rate": 3.284789644012945e-05,
"loss": 0.0676,
"step": 4210
},
{
"epoch": 40.00970873786408,
"grad_norm": 0.001428833231329918,
"learning_rate": 3.2793959007551244e-05,
"loss": 0.0323,
"step": 4220
},
{
"epoch": 40.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.084999680519104,
"eval_runtime": 25.475,
"eval_samples_per_second": 1.099,
"eval_steps_per_second": 0.55,
"step": 4223
},
{
"epoch": 41.00067961165048,
"grad_norm": 0.0026885566767305136,
"learning_rate": 3.274002157497303e-05,
"loss": 0.361,
"step": 4230
},
{
"epoch": 41.00165048543689,
"grad_norm": 0.000607341411523521,
"learning_rate": 3.2686084142394826e-05,
"loss": 0.0327,
"step": 4240
},
{
"epoch": 41.0026213592233,
"grad_norm": 0.0008909976459108293,
"learning_rate": 3.263214670981661e-05,
"loss": 0.0152,
"step": 4250
},
{
"epoch": 41.00359223300971,
"grad_norm": 0.006362698040902615,
"learning_rate": 3.25782092772384e-05,
"loss": 0.2218,
"step": 4260
},
{
"epoch": 41.00456310679612,
"grad_norm": 0.003075315849855542,
"learning_rate": 3.2524271844660195e-05,
"loss": 0.0001,
"step": 4270
},
{
"epoch": 41.00553398058253,
"grad_norm": 0.36094582080841064,
"learning_rate": 3.247033441208199e-05,
"loss": 0.0012,
"step": 4280
},
{
"epoch": 41.006504854368934,
"grad_norm": 0.008738203905522823,
"learning_rate": 3.2416396979503777e-05,
"loss": 0.0027,
"step": 4290
},
{
"epoch": 41.00747572815534,
"grad_norm": 0.04587085172533989,
"learning_rate": 3.2362459546925564e-05,
"loss": 0.1983,
"step": 4300
},
{
"epoch": 41.00844660194175,
"grad_norm": 8.648506164550781,
"learning_rate": 3.230852211434736e-05,
"loss": 0.2518,
"step": 4310
},
{
"epoch": 41.009417475728156,
"grad_norm": 0.02086603082716465,
"learning_rate": 3.225458468176915e-05,
"loss": 0.0003,
"step": 4320
},
{
"epoch": 41.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.015878677368164,
"eval_runtime": 23.5678,
"eval_samples_per_second": 1.188,
"eval_steps_per_second": 0.594,
"step": 4326
},
{
"epoch": 42.000388349514566,
"grad_norm": 0.22889237105846405,
"learning_rate": 3.220064724919094e-05,
"loss": 0.3213,
"step": 4330
},
{
"epoch": 42.00135922330097,
"grad_norm": 0.0015255825128406286,
"learning_rate": 3.214670981661273e-05,
"loss": 0.0002,
"step": 4340
},
{
"epoch": 42.00233009708738,
"grad_norm": 0.0007126994314603508,
"learning_rate": 3.209277238403452e-05,
"loss": 0.2395,
"step": 4350
},
{
"epoch": 42.00330097087379,
"grad_norm": 0.0013461983762681484,
"learning_rate": 3.2038834951456316e-05,
"loss": 0.0033,
"step": 4360
},
{
"epoch": 42.004271844660195,
"grad_norm": 0.0021904853638261557,
"learning_rate": 3.1984897518878104e-05,
"loss": 0.0611,
"step": 4370
},
{
"epoch": 42.0052427184466,
"grad_norm": 1.7471833229064941,
"learning_rate": 3.193096008629989e-05,
"loss": 0.0031,
"step": 4380
},
{
"epoch": 42.00621359223301,
"grad_norm": 4.915556907653809,
"learning_rate": 3.1877022653721685e-05,
"loss": 0.005,
"step": 4390
},
{
"epoch": 42.00718446601942,
"grad_norm": 61.77727508544922,
"learning_rate": 3.182308522114347e-05,
"loss": 0.4333,
"step": 4400
},
{
"epoch": 42.008155339805825,
"grad_norm": 0.0011002025566995144,
"learning_rate": 3.176914778856527e-05,
"loss": 0.1166,
"step": 4410
},
{
"epoch": 42.00912621359223,
"grad_norm": 0.0014632418751716614,
"learning_rate": 3.1715210355987055e-05,
"loss": 0.0743,
"step": 4420
},
{
"epoch": 42.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.9593519568443298,
"eval_runtime": 23.9476,
"eval_samples_per_second": 1.169,
"eval_steps_per_second": 0.585,
"step": 4429
},
{
"epoch": 43.00009708737864,
"grad_norm": 0.0036053850781172514,
"learning_rate": 3.166127292340885e-05,
"loss": 0.0007,
"step": 4430
},
{
"epoch": 43.00106796116505,
"grad_norm": 0.009421459399163723,
"learning_rate": 3.1607335490830636e-05,
"loss": 0.0001,
"step": 4440
},
{
"epoch": 43.002038834951456,
"grad_norm": 0.0017767982790246606,
"learning_rate": 3.155339805825243e-05,
"loss": 0.0033,
"step": 4450
},
{
"epoch": 43.00300970873786,
"grad_norm": 0.0010528108105063438,
"learning_rate": 3.149946062567422e-05,
"loss": 0.0079,
"step": 4460
},
{
"epoch": 43.00398058252427,
"grad_norm": 0.0015437144320458174,
"learning_rate": 3.144552319309601e-05,
"loss": 0.0105,
"step": 4470
},
{
"epoch": 43.00495145631068,
"grad_norm": 0.004825149197131395,
"learning_rate": 3.13915857605178e-05,
"loss": 0.0001,
"step": 4480
},
{
"epoch": 43.005922330097086,
"grad_norm": 0.0013643408892676234,
"learning_rate": 3.133764832793959e-05,
"loss": 0.0154,
"step": 4490
},
{
"epoch": 43.00689320388349,
"grad_norm": 0.004657248966395855,
"learning_rate": 3.128371089536138e-05,
"loss": 0.3757,
"step": 4500
},
{
"epoch": 43.0078640776699,
"grad_norm": 0.0017380419885739684,
"learning_rate": 3.1229773462783176e-05,
"loss": 0.0004,
"step": 4510
},
{
"epoch": 43.00883495145631,
"grad_norm": 0.002304408699274063,
"learning_rate": 3.1175836030204964e-05,
"loss": 0.2601,
"step": 4520
},
{
"epoch": 43.009805825242715,
"grad_norm": 0.004452872090041637,
"learning_rate": 3.112189859762675e-05,
"loss": 0.0949,
"step": 4530
},
{
"epoch": 43.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.130449891090393,
"eval_runtime": 21.6097,
"eval_samples_per_second": 1.296,
"eval_steps_per_second": 0.648,
"step": 4532
},
{
"epoch": 44.000776699029124,
"grad_norm": 2.61712384223938,
"learning_rate": 3.1067961165048545e-05,
"loss": 0.2651,
"step": 4540
},
{
"epoch": 44.00174757281553,
"grad_norm": 0.0021068707574158907,
"learning_rate": 3.101402373247034e-05,
"loss": 0.4076,
"step": 4550
},
{
"epoch": 44.00271844660194,
"grad_norm": 2.136120319366455,
"learning_rate": 3.096008629989213e-05,
"loss": 0.034,
"step": 4560
},
{
"epoch": 44.00368932038835,
"grad_norm": 0.01782909780740738,
"learning_rate": 3.0906148867313915e-05,
"loss": 0.0002,
"step": 4570
},
{
"epoch": 44.004660194174754,
"grad_norm": 0.0028514356818050146,
"learning_rate": 3.085221143473571e-05,
"loss": 0.0002,
"step": 4580
},
{
"epoch": 44.00563106796117,
"grad_norm": 0.0018266479019075632,
"learning_rate": 3.07982740021575e-05,
"loss": 0.293,
"step": 4590
},
{
"epoch": 44.006601941747576,
"grad_norm": 0.002658822340890765,
"learning_rate": 3.074433656957929e-05,
"loss": 0.2078,
"step": 4600
},
{
"epoch": 44.00757281553398,
"grad_norm": 0.6835060715675354,
"learning_rate": 3.069039913700108e-05,
"loss": 0.0017,
"step": 4610
},
{
"epoch": 44.00854368932039,
"grad_norm": 0.07813756167888641,
"learning_rate": 3.063646170442287e-05,
"loss": 0.3032,
"step": 4620
},
{
"epoch": 44.0095145631068,
"grad_norm": 0.005411970894783735,
"learning_rate": 3.058252427184466e-05,
"loss": 0.0047,
"step": 4630
},
{
"epoch": 44.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 1.2497036457061768,
"eval_runtime": 24.1631,
"eval_samples_per_second": 1.159,
"eval_steps_per_second": 0.579,
"step": 4635
},
{
"epoch": 45.00048543689321,
"grad_norm": 0.008571690879762173,
"learning_rate": 3.0528586839266454e-05,
"loss": 0.0001,
"step": 4640
},
{
"epoch": 45.001456310679615,
"grad_norm": 0.0012251570587977767,
"learning_rate": 3.0474649406688245e-05,
"loss": 0.0001,
"step": 4650
},
{
"epoch": 45.00242718446602,
"grad_norm": 0.0016704073641449213,
"learning_rate": 3.0420711974110033e-05,
"loss": 0.0009,
"step": 4660
},
{
"epoch": 45.00339805825243,
"grad_norm": 0.0016477880999445915,
"learning_rate": 3.0366774541531823e-05,
"loss": 0.0004,
"step": 4670
},
{
"epoch": 45.00436893203884,
"grad_norm": 0.002065768465399742,
"learning_rate": 3.0312837108953618e-05,
"loss": 0.0178,
"step": 4680
},
{
"epoch": 45.005339805825244,
"grad_norm": 0.0015899483114480972,
"learning_rate": 3.025889967637541e-05,
"loss": 0.3922,
"step": 4690
},
{
"epoch": 45.00631067961165,
"grad_norm": 0.0013735065003857017,
"learning_rate": 3.0204962243797196e-05,
"loss": 0.034,
"step": 4700
},
{
"epoch": 45.00728155339806,
"grad_norm": 0.0015412523644044995,
"learning_rate": 3.0151024811218987e-05,
"loss": 0.0121,
"step": 4710
},
{
"epoch": 45.008252427184466,
"grad_norm": 0.0010480155469849706,
"learning_rate": 3.0097087378640774e-05,
"loss": 0.0006,
"step": 4720
},
{
"epoch": 45.00922330097087,
"grad_norm": 0.010585146956145763,
"learning_rate": 3.0043149946062572e-05,
"loss": 0.2398,
"step": 4730
},
{
"epoch": 45.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.9287577867507935,
"eval_runtime": 23.5551,
"eval_samples_per_second": 1.189,
"eval_steps_per_second": 0.594,
"step": 4738
},
{
"epoch": 46.00019417475728,
"grad_norm": 0.0012301468523219228,
"learning_rate": 2.998921251348436e-05,
"loss": 0.0287,
"step": 4740
},
{
"epoch": 46.00116504854369,
"grad_norm": 0.0015091183595359325,
"learning_rate": 2.993527508090615e-05,
"loss": 0.0005,
"step": 4750
},
{
"epoch": 46.0021359223301,
"grad_norm": 5.117990493774414,
"learning_rate": 2.9881337648327938e-05,
"loss": 0.0107,
"step": 4760
},
{
"epoch": 46.003106796116505,
"grad_norm": 0.0007064930396154523,
"learning_rate": 2.982740021574973e-05,
"loss": 0.0002,
"step": 4770
},
{
"epoch": 46.00407766990291,
"grad_norm": 0.00104942184407264,
"learning_rate": 2.9773462783171523e-05,
"loss": 0.0185,
"step": 4780
},
{
"epoch": 46.00504854368932,
"grad_norm": 0.001231582835316658,
"learning_rate": 2.9719525350593314e-05,
"loss": 0.0002,
"step": 4790
},
{
"epoch": 46.00601941747573,
"grad_norm": 26.493947982788086,
"learning_rate": 2.96655879180151e-05,
"loss": 0.1135,
"step": 4800
},
{
"epoch": 46.006990291262134,
"grad_norm": 0.0015699361683800817,
"learning_rate": 2.9611650485436892e-05,
"loss": 0.0181,
"step": 4810
},
{
"epoch": 46.00796116504854,
"grad_norm": 0.004447311628609896,
"learning_rate": 2.9557713052858687e-05,
"loss": 0.2114,
"step": 4820
},
{
"epoch": 46.00893203883495,
"grad_norm": 0.00241004372946918,
"learning_rate": 2.9503775620280478e-05,
"loss": 0.0001,
"step": 4830
},
{
"epoch": 46.00990291262136,
"grad_norm": 0.0018498291028663516,
"learning_rate": 2.9449838187702265e-05,
"loss": 0.0002,
"step": 4840
},
{
"epoch": 46.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.780640721321106,
"eval_runtime": 23.3508,
"eval_samples_per_second": 1.199,
"eval_steps_per_second": 0.6,
"step": 4841
},
{
"epoch": 47.000873786407766,
"grad_norm": 0.001025457284413278,
"learning_rate": 2.9395900755124056e-05,
"loss": 0.0386,
"step": 4850
},
{
"epoch": 47.00184466019417,
"grad_norm": 0.0006625480600632727,
"learning_rate": 2.9341963322545847e-05,
"loss": 0.0004,
"step": 4860
},
{
"epoch": 47.00281553398058,
"grad_norm": 0.0031967784743756056,
"learning_rate": 2.928802588996764e-05,
"loss": 0.0513,
"step": 4870
},
{
"epoch": 47.00378640776699,
"grad_norm": 0.0007817107252776623,
"learning_rate": 2.923408845738943e-05,
"loss": 0.0008,
"step": 4880
},
{
"epoch": 47.004757281553395,
"grad_norm": 0.0005676000728271902,
"learning_rate": 2.918015102481122e-05,
"loss": 0.1106,
"step": 4890
},
{
"epoch": 47.0057281553398,
"grad_norm": 1.6445106267929077,
"learning_rate": 2.912621359223301e-05,
"loss": 0.0238,
"step": 4900
},
{
"epoch": 47.00669902912621,
"grad_norm": 0.001159881940111518,
"learning_rate": 2.9072276159654805e-05,
"loss": 0.0027,
"step": 4910
},
{
"epoch": 47.007669902912625,
"grad_norm": 0.0012647173134610057,
"learning_rate": 2.9018338727076596e-05,
"loss": 0.2917,
"step": 4920
},
{
"epoch": 47.00864077669903,
"grad_norm": 0.0034010119270533323,
"learning_rate": 2.8964401294498383e-05,
"loss": 0.0002,
"step": 4930
},
{
"epoch": 47.00961165048544,
"grad_norm": 0.0016616523498669267,
"learning_rate": 2.8910463861920174e-05,
"loss": 0.1631,
"step": 4940
},
{
"epoch": 47.01,
"eval_accuracy": 0.7142857142857143,
"eval_loss": 1.311640739440918,
"eval_runtime": 24.0191,
"eval_samples_per_second": 1.166,
"eval_steps_per_second": 0.583,
"step": 4944
},
{
"epoch": 48.00058252427184,
"grad_norm": 0.000713896646630019,
"learning_rate": 2.885652642934196e-05,
"loss": 0.1625,
"step": 4950
},
{
"epoch": 48.00155339805825,
"grad_norm": 26.299646377563477,
"learning_rate": 2.880258899676376e-05,
"loss": 0.034,
"step": 4960
},
{
"epoch": 48.00252427184466,
"grad_norm": 0.0013544855173677206,
"learning_rate": 2.8748651564185547e-05,
"loss": 0.0098,
"step": 4970
},
{
"epoch": 48.00349514563107,
"grad_norm": 0.013510116375982761,
"learning_rate": 2.8694714131607337e-05,
"loss": 0.0388,
"step": 4980
},
{
"epoch": 48.00446601941748,
"grad_norm": 0.00144234171602875,
"learning_rate": 2.8640776699029125e-05,
"loss": 0.004,
"step": 4990
},
{
"epoch": 48.005436893203886,
"grad_norm": 0.7181257009506226,
"learning_rate": 2.8586839266450923e-05,
"loss": 0.0299,
"step": 5000
},
{
"epoch": 48.00640776699029,
"grad_norm": 0.0009681765804998577,
"learning_rate": 2.853290183387271e-05,
"loss": 0.0001,
"step": 5010
},
{
"epoch": 48.0073786407767,
"grad_norm": 0.005896919872611761,
"learning_rate": 2.84789644012945e-05,
"loss": 0.4035,
"step": 5020
},
{
"epoch": 48.00834951456311,
"grad_norm": 8.744750022888184,
"learning_rate": 2.842502696871629e-05,
"loss": 0.0138,
"step": 5030
},
{
"epoch": 48.009320388349515,
"grad_norm": 0.0005152505473233759,
"learning_rate": 2.837108953613808e-05,
"loss": 0.0483,
"step": 5040
},
{
"epoch": 48.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 0.9696491956710815,
"eval_runtime": 23.5248,
"eval_samples_per_second": 1.19,
"eval_steps_per_second": 0.595,
"step": 5047
},
{
"epoch": 49.000291262135924,
"grad_norm": 0.27486586570739746,
"learning_rate": 2.8317152103559874e-05,
"loss": 0.0006,
"step": 5050
},
{
"epoch": 49.00126213592233,
"grad_norm": 0.0013216667575761676,
"learning_rate": 2.8263214670981665e-05,
"loss": 0.4652,
"step": 5060
},
{
"epoch": 49.00223300970874,
"grad_norm": 0.0021054185926914215,
"learning_rate": 2.8209277238403452e-05,
"loss": 0.0001,
"step": 5070
},
{
"epoch": 49.003203883495146,
"grad_norm": 0.01227721106261015,
"learning_rate": 2.8155339805825243e-05,
"loss": 0.0527,
"step": 5080
},
{
"epoch": 49.004174757281554,
"grad_norm": 7.8854780197143555,
"learning_rate": 2.8101402373247034e-05,
"loss": 0.015,
"step": 5090
},
{
"epoch": 49.00514563106796,
"grad_norm": 0.0035126330330967903,
"learning_rate": 2.8047464940668828e-05,
"loss": 0.1389,
"step": 5100
},
{
"epoch": 49.00611650485437,
"grad_norm": 0.0020686753559857607,
"learning_rate": 2.7993527508090616e-05,
"loss": 0.0001,
"step": 5110
},
{
"epoch": 49.007087378640776,
"grad_norm": 0.0006003176677040756,
"learning_rate": 2.7939590075512406e-05,
"loss": 0.0001,
"step": 5120
},
{
"epoch": 49.00805825242718,
"grad_norm": 0.0008518914110027254,
"learning_rate": 2.7885652642934197e-05,
"loss": 0.0001,
"step": 5130
},
{
"epoch": 49.00902912621359,
"grad_norm": 0.0007975481566973031,
"learning_rate": 2.783171521035599e-05,
"loss": 0.0001,
"step": 5140
},
{
"epoch": 49.01,
"grad_norm": 0.0018201102502644062,
"learning_rate": 2.777777777777778e-05,
"loss": 0.0001,
"step": 5150
},
{
"epoch": 49.01,
"eval_accuracy": 0.8928571428571429,
"eval_loss": 0.8070898652076721,
"eval_runtime": 23.6805,
"eval_samples_per_second": 1.182,
"eval_steps_per_second": 0.591,
"step": 5150
},
{
"epoch": 50.00097087378641,
"grad_norm": 0.0008396881748922169,
"learning_rate": 2.772384034519957e-05,
"loss": 0.0001,
"step": 5160
},
{
"epoch": 50.001941747572815,
"grad_norm": 0.0008467771112918854,
"learning_rate": 2.766990291262136e-05,
"loss": 0.0002,
"step": 5170
},
{
"epoch": 50.00291262135922,
"grad_norm": 0.0013826977228745818,
"learning_rate": 2.761596548004315e-05,
"loss": 0.0001,
"step": 5180
},
{
"epoch": 50.00388349514563,
"grad_norm": 0.0011158462148159742,
"learning_rate": 2.7562028047464943e-05,
"loss": 0.2459,
"step": 5190
},
{
"epoch": 50.00485436893204,
"grad_norm": 0.0016094492748379707,
"learning_rate": 2.7508090614886734e-05,
"loss": 0.3482,
"step": 5200
},
{
"epoch": 50.005825242718444,
"grad_norm": 0.004732028115540743,
"learning_rate": 2.7454153182308524e-05,
"loss": 0.0006,
"step": 5210
},
{
"epoch": 50.00679611650485,
"grad_norm": 0.0011407621204853058,
"learning_rate": 2.7400215749730312e-05,
"loss": 0.0,
"step": 5220
},
{
"epoch": 50.00776699029126,
"grad_norm": 0.0013770820805802941,
"learning_rate": 2.7346278317152106e-05,
"loss": 0.2484,
"step": 5230
},
{
"epoch": 50.00873786407767,
"grad_norm": 0.005836030002683401,
"learning_rate": 2.7292340884573897e-05,
"loss": 0.2005,
"step": 5240
},
{
"epoch": 50.00970873786408,
"grad_norm": 0.0005388998542912304,
"learning_rate": 2.7238403451995688e-05,
"loss": 0.0592,
"step": 5250
},
{
"epoch": 50.01,
"eval_accuracy": 0.8928571428571429,
"eval_loss": 0.8054033517837524,
"eval_runtime": 23.5154,
"eval_samples_per_second": 1.191,
"eval_steps_per_second": 0.595,
"step": 5253
},
{
"epoch": 51.00067961165048,
"grad_norm": 0.0008743960061110556,
"learning_rate": 2.7184466019417475e-05,
"loss": 0.0328,
"step": 5260
},
{
"epoch": 51.00165048543689,
"grad_norm": 0.0006349069881252944,
"learning_rate": 2.7130528586839266e-05,
"loss": 0.0001,
"step": 5270
},
{
"epoch": 51.0026213592233,
"grad_norm": 0.03142313286662102,
"learning_rate": 2.707659115426106e-05,
"loss": 0.0002,
"step": 5280
},
{
"epoch": 51.00359223300971,
"grad_norm": 0.0012027677148580551,
"learning_rate": 2.702265372168285e-05,
"loss": 0.0001,
"step": 5290
},
{
"epoch": 51.00456310679612,
"grad_norm": 0.002473101019859314,
"learning_rate": 2.696871628910464e-05,
"loss": 0.2327,
"step": 5300
},
{
"epoch": 51.00553398058253,
"grad_norm": 0.001101154601201415,
"learning_rate": 2.691477885652643e-05,
"loss": 0.1636,
"step": 5310
},
{
"epoch": 51.006504854368934,
"grad_norm": 0.0010517933405935764,
"learning_rate": 2.6860841423948217e-05,
"loss": 0.0005,
"step": 5320
},
{
"epoch": 51.00747572815534,
"grad_norm": 0.013515341095626354,
"learning_rate": 2.6806903991370015e-05,
"loss": 0.0001,
"step": 5330
},
{
"epoch": 51.00844660194175,
"grad_norm": 0.0009451622026972473,
"learning_rate": 2.6752966558791803e-05,
"loss": 0.0017,
"step": 5340
},
{
"epoch": 51.009417475728156,
"grad_norm": 0.0007028866093605757,
"learning_rate": 2.6699029126213593e-05,
"loss": 0.0,
"step": 5350
},
{
"epoch": 51.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 0.8496811985969543,
"eval_runtime": 21.2389,
"eval_samples_per_second": 1.318,
"eval_steps_per_second": 0.659,
"step": 5356
},
{
"epoch": 52.000388349514566,
"grad_norm": 0.0008241264731623232,
"learning_rate": 2.664509169363538e-05,
"loss": 0.5666,
"step": 5360
},
{
"epoch": 52.00135922330097,
"grad_norm": 0.44574812054634094,
"learning_rate": 2.659115426105718e-05,
"loss": 0.0052,
"step": 5370
},
{
"epoch": 52.00233009708738,
"grad_norm": 0.0008719266625121236,
"learning_rate": 2.6537216828478966e-05,
"loss": 0.0001,
"step": 5380
},
{
"epoch": 52.00330097087379,
"grad_norm": 0.01195058785378933,
"learning_rate": 2.6483279395900757e-05,
"loss": 0.0001,
"step": 5390
},
{
"epoch": 52.004271844660195,
"grad_norm": 0.001950486097484827,
"learning_rate": 2.6429341963322544e-05,
"loss": 0.0109,
"step": 5400
},
{
"epoch": 52.0052427184466,
"grad_norm": 0.0007938763592392206,
"learning_rate": 2.6375404530744335e-05,
"loss": 0.0001,
"step": 5410
},
{
"epoch": 52.00621359223301,
"grad_norm": 0.0011625858023762703,
"learning_rate": 2.632146709816613e-05,
"loss": 0.0001,
"step": 5420
},
{
"epoch": 52.00718446601942,
"grad_norm": 0.0020412274170666933,
"learning_rate": 2.626752966558792e-05,
"loss": 0.0001,
"step": 5430
},
{
"epoch": 52.008155339805825,
"grad_norm": 1.1743907928466797,
"learning_rate": 2.6213592233009708e-05,
"loss": 0.0008,
"step": 5440
},
{
"epoch": 52.00912621359223,
"grad_norm": 0.0020884834229946136,
"learning_rate": 2.61596548004315e-05,
"loss": 0.0054,
"step": 5450
},
{
"epoch": 52.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.3964213132858276,
"eval_runtime": 20.8396,
"eval_samples_per_second": 1.344,
"eval_steps_per_second": 0.672,
"step": 5459
},
{
"epoch": 53.00009708737864,
"grad_norm": 0.0005270927213132381,
"learning_rate": 2.6105717367853293e-05,
"loss": 0.0001,
"step": 5460
},
{
"epoch": 53.00106796116505,
"grad_norm": 0.005888211075216532,
"learning_rate": 2.6051779935275084e-05,
"loss": 0.0115,
"step": 5470
},
{
"epoch": 53.002038834951456,
"grad_norm": 0.0006581686902791262,
"learning_rate": 2.599784250269687e-05,
"loss": 0.0001,
"step": 5480
},
{
"epoch": 53.00300970873786,
"grad_norm": 0.000697305251378566,
"learning_rate": 2.5943905070118662e-05,
"loss": 0.0002,
"step": 5490
},
{
"epoch": 53.00398058252427,
"grad_norm": 0.09318854659795761,
"learning_rate": 2.5889967637540453e-05,
"loss": 0.0013,
"step": 5500
},
{
"epoch": 53.00495145631068,
"grad_norm": 0.0016455305740237236,
"learning_rate": 2.5836030204962248e-05,
"loss": 0.5625,
"step": 5510
},
{
"epoch": 53.005922330097086,
"grad_norm": 0.0006702914251945913,
"learning_rate": 2.5782092772384035e-05,
"loss": 0.0013,
"step": 5520
},
{
"epoch": 53.00689320388349,
"grad_norm": 0.0012202069628983736,
"learning_rate": 2.5728155339805826e-05,
"loss": 0.0001,
"step": 5530
},
{
"epoch": 53.0078640776699,
"grad_norm": 0.0017498712986707687,
"learning_rate": 2.5674217907227617e-05,
"loss": 0.0645,
"step": 5540
},
{
"epoch": 53.00883495145631,
"grad_norm": 0.0019502121722325683,
"learning_rate": 2.5620280474649404e-05,
"loss": 0.0002,
"step": 5550
},
{
"epoch": 53.009805825242715,
"grad_norm": 0.0021820820402354,
"learning_rate": 2.55663430420712e-05,
"loss": 0.0003,
"step": 5560
},
{
"epoch": 53.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.0987943410873413,
"eval_runtime": 22.2614,
"eval_samples_per_second": 1.258,
"eval_steps_per_second": 0.629,
"step": 5562
},
{
"epoch": 54.000776699029124,
"grad_norm": 0.713193953037262,
"learning_rate": 2.551240560949299e-05,
"loss": 0.0164,
"step": 5570
},
{
"epoch": 54.00174757281553,
"grad_norm": 0.015315674245357513,
"learning_rate": 2.545846817691478e-05,
"loss": 0.0001,
"step": 5580
},
{
"epoch": 54.00271844660194,
"grad_norm": 17.17043113708496,
"learning_rate": 2.5404530744336568e-05,
"loss": 0.3176,
"step": 5590
},
{
"epoch": 54.00368932038835,
"grad_norm": 0.009084532968699932,
"learning_rate": 2.5350593311758362e-05,
"loss": 0.5636,
"step": 5600
},
{
"epoch": 54.004660194174754,
"grad_norm": 0.0010076395701617002,
"learning_rate": 2.5296655879180153e-05,
"loss": 0.1239,
"step": 5610
},
{
"epoch": 54.00563106796117,
"grad_norm": 0.022011147812008858,
"learning_rate": 2.5242718446601944e-05,
"loss": 0.0003,
"step": 5620
},
{
"epoch": 54.006601941747576,
"grad_norm": 0.005740031599998474,
"learning_rate": 2.518878101402373e-05,
"loss": 0.4295,
"step": 5630
},
{
"epoch": 54.00757281553398,
"grad_norm": 0.002488040132448077,
"learning_rate": 2.5134843581445522e-05,
"loss": 0.0545,
"step": 5640
},
{
"epoch": 54.00854368932039,
"grad_norm": 0.1785544604063034,
"learning_rate": 2.5080906148867317e-05,
"loss": 0.0978,
"step": 5650
},
{
"epoch": 54.0095145631068,
"grad_norm": 0.0005429410957731307,
"learning_rate": 2.5026968716289107e-05,
"loss": 0.0011,
"step": 5660
},
{
"epoch": 54.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.0403578281402588,
"eval_runtime": 22.9391,
"eval_samples_per_second": 1.221,
"eval_steps_per_second": 0.61,
"step": 5665
},
{
"epoch": 55.00048543689321,
"grad_norm": 19.081138610839844,
"learning_rate": 2.4973031283710895e-05,
"loss": 0.0768,
"step": 5670
},
{
"epoch": 55.001456310679615,
"grad_norm": 0.001313048996962607,
"learning_rate": 2.491909385113269e-05,
"loss": 0.0001,
"step": 5680
},
{
"epoch": 55.00242718446602,
"grad_norm": 0.0006067248759791255,
"learning_rate": 2.4865156418554477e-05,
"loss": 0.0002,
"step": 5690
},
{
"epoch": 55.00339805825243,
"grad_norm": 3.0214898586273193,
"learning_rate": 2.481121898597627e-05,
"loss": 0.0041,
"step": 5700
},
{
"epoch": 55.00436893203884,
"grad_norm": 0.0009692611638456583,
"learning_rate": 2.475728155339806e-05,
"loss": 0.0003,
"step": 5710
},
{
"epoch": 55.005339805825244,
"grad_norm": 0.000569820636883378,
"learning_rate": 2.470334412081985e-05,
"loss": 0.0,
"step": 5720
},
{
"epoch": 55.00631067961165,
"grad_norm": 0.00099732365924865,
"learning_rate": 2.464940668824164e-05,
"loss": 0.0004,
"step": 5730
},
{
"epoch": 55.00728155339806,
"grad_norm": 0.0022911953274160624,
"learning_rate": 2.459546925566343e-05,
"loss": 0.0023,
"step": 5740
},
{
"epoch": 55.008252427184466,
"grad_norm": 0.0014538817340508103,
"learning_rate": 2.4541531823085222e-05,
"loss": 0.0001,
"step": 5750
},
{
"epoch": 55.00922330097087,
"grad_norm": 0.27011483907699585,
"learning_rate": 2.4487594390507013e-05,
"loss": 0.0002,
"step": 5760
},
{
"epoch": 55.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.2517061233520508,
"eval_runtime": 25.0842,
"eval_samples_per_second": 1.116,
"eval_steps_per_second": 0.558,
"step": 5768
},
{
"epoch": 56.00019417475728,
"grad_norm": 0.0008854252519086003,
"learning_rate": 2.4433656957928804e-05,
"loss": 0.0,
"step": 5770
},
{
"epoch": 56.00116504854369,
"grad_norm": 0.0011844671098515391,
"learning_rate": 2.4379719525350595e-05,
"loss": 0.0002,
"step": 5780
},
{
"epoch": 56.0021359223301,
"grad_norm": 0.0008450274472124875,
"learning_rate": 2.4325782092772386e-05,
"loss": 0.0001,
"step": 5790
},
{
"epoch": 56.003106796116505,
"grad_norm": 0.0004037294420413673,
"learning_rate": 2.4271844660194176e-05,
"loss": 0.2135,
"step": 5800
},
{
"epoch": 56.00407766990291,
"grad_norm": 0.0006636450416408479,
"learning_rate": 2.4217907227615967e-05,
"loss": 0.2917,
"step": 5810
},
{
"epoch": 56.00504854368932,
"grad_norm": 0.18323852121829987,
"learning_rate": 2.4163969795037758e-05,
"loss": 0.0004,
"step": 5820
},
{
"epoch": 56.00601941747573,
"grad_norm": 0.03130364790558815,
"learning_rate": 2.411003236245955e-05,
"loss": 0.0583,
"step": 5830
},
{
"epoch": 56.006990291262134,
"grad_norm": 0.0004375301650725305,
"learning_rate": 2.405609492988134e-05,
"loss": 0.0002,
"step": 5840
},
{
"epoch": 56.00796116504854,
"grad_norm": 0.003825717605650425,
"learning_rate": 2.400215749730313e-05,
"loss": 0.0036,
"step": 5850
},
{
"epoch": 56.00893203883495,
"grad_norm": 0.001984888222068548,
"learning_rate": 2.394822006472492e-05,
"loss": 0.0,
"step": 5860
},
{
"epoch": 56.00990291262136,
"grad_norm": 0.001028253580443561,
"learning_rate": 2.3894282632146713e-05,
"loss": 0.0003,
"step": 5870
},
{
"epoch": 56.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 1.4791873693466187,
"eval_runtime": 20.5712,
"eval_samples_per_second": 1.361,
"eval_steps_per_second": 0.681,
"step": 5871
},
{
"epoch": 57.000873786407766,
"grad_norm": 0.001414372236467898,
"learning_rate": 2.38403451995685e-05,
"loss": 0.0001,
"step": 5880
},
{
"epoch": 57.00184466019417,
"grad_norm": 0.0013856083387508988,
"learning_rate": 2.3786407766990294e-05,
"loss": 0.0001,
"step": 5890
},
{
"epoch": 57.00281553398058,
"grad_norm": 0.0007620238466188312,
"learning_rate": 2.3732470334412082e-05,
"loss": 0.02,
"step": 5900
},
{
"epoch": 57.00378640776699,
"grad_norm": 0.0008736704476177692,
"learning_rate": 2.3678532901833876e-05,
"loss": 0.4739,
"step": 5910
},
{
"epoch": 57.004757281553395,
"grad_norm": 0.00041962246177718043,
"learning_rate": 2.3624595469255664e-05,
"loss": 0.0001,
"step": 5920
},
{
"epoch": 57.0057281553398,
"grad_norm": 0.0010289285564795136,
"learning_rate": 2.3570658036677458e-05,
"loss": 0.0036,
"step": 5930
},
{
"epoch": 57.00669902912621,
"grad_norm": 0.0009487426141276956,
"learning_rate": 2.3516720604099245e-05,
"loss": 0.0101,
"step": 5940
},
{
"epoch": 57.007669902912625,
"grad_norm": 0.004343108739703894,
"learning_rate": 2.3462783171521036e-05,
"loss": 0.0001,
"step": 5950
},
{
"epoch": 57.00864077669903,
"grad_norm": 0.0014376341132447124,
"learning_rate": 2.3408845738942827e-05,
"loss": 0.3708,
"step": 5960
},
{
"epoch": 57.00961165048544,
"grad_norm": 0.0006963663618080318,
"learning_rate": 2.3354908306364618e-05,
"loss": 0.0001,
"step": 5970
},
{
"epoch": 57.01,
"eval_accuracy": 0.75,
"eval_loss": 1.5388238430023193,
"eval_runtime": 24.6506,
"eval_samples_per_second": 1.136,
"eval_steps_per_second": 0.568,
"step": 5974
},
{
"epoch": 58.00058252427184,
"grad_norm": 0.001456027734093368,
"learning_rate": 2.330097087378641e-05,
"loss": 0.0005,
"step": 5980
},
{
"epoch": 58.00155339805825,
"grad_norm": 0.0018259447533637285,
"learning_rate": 2.32470334412082e-05,
"loss": 0.003,
"step": 5990
},
{
"epoch": 58.00252427184466,
"grad_norm": 0.00042366181151010096,
"learning_rate": 2.319309600862999e-05,
"loss": 0.0017,
"step": 6000
},
{
"epoch": 58.00349514563107,
"grad_norm": 0.0008776785107329488,
"learning_rate": 2.313915857605178e-05,
"loss": 0.0023,
"step": 6010
},
{
"epoch": 58.00446601941748,
"grad_norm": 127.0488510131836,
"learning_rate": 2.308522114347357e-05,
"loss": 0.1028,
"step": 6020
},
{
"epoch": 58.005436893203886,
"grad_norm": 0.0003990971017628908,
"learning_rate": 2.3031283710895363e-05,
"loss": 0.0049,
"step": 6030
},
{
"epoch": 58.00640776699029,
"grad_norm": 0.0013204180868342519,
"learning_rate": 2.297734627831715e-05,
"loss": 0.0001,
"step": 6040
},
{
"epoch": 58.0073786407767,
"grad_norm": 0.014434372074902058,
"learning_rate": 2.2923408845738945e-05,
"loss": 0.0001,
"step": 6050
},
{
"epoch": 58.00834951456311,
"grad_norm": 0.0017761661438271403,
"learning_rate": 2.2869471413160733e-05,
"loss": 0.3617,
"step": 6060
},
{
"epoch": 58.009320388349515,
"grad_norm": 0.0012962479377165437,
"learning_rate": 2.2815533980582527e-05,
"loss": 0.4154,
"step": 6070
},
{
"epoch": 58.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.1159684658050537,
"eval_runtime": 23.5255,
"eval_samples_per_second": 1.19,
"eval_steps_per_second": 0.595,
"step": 6077
},
{
"epoch": 59.000291262135924,
"grad_norm": 0.3833358585834503,
"learning_rate": 2.2761596548004314e-05,
"loss": 0.337,
"step": 6080
},
{
"epoch": 59.00126213592233,
"grad_norm": 0.0013803878100588918,
"learning_rate": 2.270765911542611e-05,
"loss": 0.0065,
"step": 6090
},
{
"epoch": 59.00223300970874,
"grad_norm": 0.0033133910037577152,
"learning_rate": 2.2653721682847896e-05,
"loss": 0.0001,
"step": 6100
},
{
"epoch": 59.003203883495146,
"grad_norm": 1.19782292842865,
"learning_rate": 2.2599784250269687e-05,
"loss": 0.0014,
"step": 6110
},
{
"epoch": 59.004174757281554,
"grad_norm": 0.010464402846992016,
"learning_rate": 2.2545846817691478e-05,
"loss": 0.0001,
"step": 6120
},
{
"epoch": 59.00514563106796,
"grad_norm": 0.0004663231084123254,
"learning_rate": 2.249190938511327e-05,
"loss": 0.3931,
"step": 6130
},
{
"epoch": 59.00611650485437,
"grad_norm": 5.216128826141357,
"learning_rate": 2.243797195253506e-05,
"loss": 0.0034,
"step": 6140
},
{
"epoch": 59.007087378640776,
"grad_norm": 0.0006288540898822248,
"learning_rate": 2.238403451995685e-05,
"loss": 0.0003,
"step": 6150
},
{
"epoch": 59.00805825242718,
"grad_norm": 0.0010511939181014895,
"learning_rate": 2.233009708737864e-05,
"loss": 0.0001,
"step": 6160
},
{
"epoch": 59.00902912621359,
"grad_norm": 0.0020628427155315876,
"learning_rate": 2.2276159654800432e-05,
"loss": 0.0002,
"step": 6170
},
{
"epoch": 59.01,
"grad_norm": 0.006775557994842529,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.0001,
"step": 6180
},
{
"epoch": 59.01,
"eval_accuracy": 0.75,
"eval_loss": 1.3917344808578491,
"eval_runtime": 25.1664,
"eval_samples_per_second": 1.113,
"eval_steps_per_second": 0.556,
"step": 6180
},
{
"epoch": 60.00097087378641,
"grad_norm": 0.0010625936556607485,
"learning_rate": 2.2168284789644014e-05,
"loss": 0.296,
"step": 6190
},
{
"epoch": 60.001941747572815,
"grad_norm": 0.000770657614339143,
"learning_rate": 2.2114347357065805e-05,
"loss": 0.0043,
"step": 6200
},
{
"epoch": 60.00291262135922,
"grad_norm": 0.0016896168235689402,
"learning_rate": 2.2060409924487596e-05,
"loss": 0.3507,
"step": 6210
},
{
"epoch": 60.00388349514563,
"grad_norm": 0.0007442439091391861,
"learning_rate": 2.2006472491909387e-05,
"loss": 0.0014,
"step": 6220
},
{
"epoch": 60.00485436893204,
"grad_norm": 0.0007845446816645563,
"learning_rate": 2.1952535059331178e-05,
"loss": 0.0001,
"step": 6230
},
{
"epoch": 60.005825242718444,
"grad_norm": 0.0009400100097991526,
"learning_rate": 2.189859762675297e-05,
"loss": 0.0,
"step": 6240
},
{
"epoch": 60.00679611650485,
"grad_norm": 0.0017640365986153483,
"learning_rate": 2.1844660194174756e-05,
"loss": 0.0037,
"step": 6250
},
{
"epoch": 60.00776699029126,
"grad_norm": 10.778125762939453,
"learning_rate": 2.179072276159655e-05,
"loss": 0.0086,
"step": 6260
},
{
"epoch": 60.00873786407767,
"grad_norm": 24.49880027770996,
"learning_rate": 2.1736785329018338e-05,
"loss": 0.1088,
"step": 6270
},
{
"epoch": 60.00970873786408,
"grad_norm": 0.0011425875127315521,
"learning_rate": 2.1682847896440132e-05,
"loss": 0.0001,
"step": 6280
},
{
"epoch": 60.01,
"eval_accuracy": 0.7142857142857143,
"eval_loss": 1.5009710788726807,
"eval_runtime": 24.5264,
"eval_samples_per_second": 1.142,
"eval_steps_per_second": 0.571,
"step": 6283
},
{
"epoch": 61.00067961165048,
"grad_norm": 0.000888823124114424,
"learning_rate": 2.162891046386192e-05,
"loss": 0.0,
"step": 6290
},
{
"epoch": 61.00165048543689,
"grad_norm": 0.0028330725617706776,
"learning_rate": 2.1574973031283714e-05,
"loss": 0.0006,
"step": 6300
},
{
"epoch": 61.0026213592233,
"grad_norm": 0.00043673234176822007,
"learning_rate": 2.15210355987055e-05,
"loss": 0.033,
"step": 6310
},
{
"epoch": 61.00359223300971,
"grad_norm": 0.001583361066877842,
"learning_rate": 2.1467098166127296e-05,
"loss": 0.0884,
"step": 6320
},
{
"epoch": 61.00456310679612,
"grad_norm": 0.00048320548376068473,
"learning_rate": 2.1413160733549083e-05,
"loss": 0.0028,
"step": 6330
},
{
"epoch": 61.00553398058253,
"grad_norm": 0.0021810547914355993,
"learning_rate": 2.1359223300970874e-05,
"loss": 0.0006,
"step": 6340
},
{
"epoch": 61.006504854368934,
"grad_norm": 0.0011734174331650138,
"learning_rate": 2.1305285868392665e-05,
"loss": 0.4979,
"step": 6350
},
{
"epoch": 61.00747572815534,
"grad_norm": 81.83573913574219,
"learning_rate": 2.1251348435814456e-05,
"loss": 0.1094,
"step": 6360
},
{
"epoch": 61.00844660194175,
"grad_norm": 0.007653074339032173,
"learning_rate": 2.1197411003236247e-05,
"loss": 0.0001,
"step": 6370
},
{
"epoch": 61.009417475728156,
"grad_norm": 0.013526824302971363,
"learning_rate": 2.1143473570658038e-05,
"loss": 0.1886,
"step": 6380
},
{
"epoch": 61.01,
"eval_accuracy": 0.6785714285714286,
"eval_loss": 1.6617997884750366,
"eval_runtime": 22.6299,
"eval_samples_per_second": 1.237,
"eval_steps_per_second": 0.619,
"step": 6386
},
{
"epoch": 62.000388349514566,
"grad_norm": 0.0008851753082126379,
"learning_rate": 2.108953613807983e-05,
"loss": 0.0004,
"step": 6390
},
{
"epoch": 62.00135922330097,
"grad_norm": 0.00079250120325014,
"learning_rate": 2.103559870550162e-05,
"loss": 0.0,
"step": 6400
},
{
"epoch": 62.00233009708738,
"grad_norm": 0.000890954106580466,
"learning_rate": 2.0981661272923407e-05,
"loss": 0.0679,
"step": 6410
},
{
"epoch": 62.00330097087379,
"grad_norm": 0.006305835209786892,
"learning_rate": 2.09277238403452e-05,
"loss": 0.0001,
"step": 6420
},
{
"epoch": 62.004271844660195,
"grad_norm": 0.0009647891274653375,
"learning_rate": 2.0873786407766992e-05,
"loss": 0.0001,
"step": 6430
},
{
"epoch": 62.0052427184466,
"grad_norm": 0.0005119222914800048,
"learning_rate": 2.0819848975188783e-05,
"loss": 0.1818,
"step": 6440
},
{
"epoch": 62.00621359223301,
"grad_norm": 0.0012782260309904814,
"learning_rate": 2.0765911542610574e-05,
"loss": 0.0001,
"step": 6450
},
{
"epoch": 62.00718446601942,
"grad_norm": 0.23857539892196655,
"learning_rate": 2.0711974110032365e-05,
"loss": 0.0003,
"step": 6460
},
{
"epoch": 62.008155339805825,
"grad_norm": 0.0014477778458967805,
"learning_rate": 2.0658036677454156e-05,
"loss": 0.3823,
"step": 6470
},
{
"epoch": 62.00912621359223,
"grad_norm": 0.00042155140545219183,
"learning_rate": 2.0604099244875946e-05,
"loss": 0.0005,
"step": 6480
},
{
"epoch": 62.01,
"eval_accuracy": 0.7142857142857143,
"eval_loss": 1.3342440128326416,
"eval_runtime": 23.5059,
"eval_samples_per_second": 1.191,
"eval_steps_per_second": 0.596,
"step": 6489
},
{
"epoch": 63.00009708737864,
"grad_norm": 0.00042777747148647904,
"learning_rate": 2.0550161812297737e-05,
"loss": 0.0,
"step": 6490
},
{
"epoch": 63.00106796116505,
"grad_norm": 0.0010107433190569282,
"learning_rate": 2.0496224379719525e-05,
"loss": 0.1601,
"step": 6500
},
{
"epoch": 63.002038834951456,
"grad_norm": 0.0010543958051130176,
"learning_rate": 2.044228694714132e-05,
"loss": 0.0001,
"step": 6510
},
{
"epoch": 63.00300970873786,
"grad_norm": 0.0012591666309162974,
"learning_rate": 2.0388349514563107e-05,
"loss": 0.0001,
"step": 6520
},
{
"epoch": 63.00398058252427,
"grad_norm": 0.0005232561961747706,
"learning_rate": 2.03344120819849e-05,
"loss": 0.018,
"step": 6530
},
{
"epoch": 63.00495145631068,
"grad_norm": 0.00123388331849128,
"learning_rate": 2.028047464940669e-05,
"loss": 0.0003,
"step": 6540
},
{
"epoch": 63.005922330097086,
"grad_norm": 0.0004934226744808257,
"learning_rate": 2.0226537216828483e-05,
"loss": 0.0001,
"step": 6550
},
{
"epoch": 63.00689320388349,
"grad_norm": 15.355786323547363,
"learning_rate": 2.017259978425027e-05,
"loss": 0.428,
"step": 6560
},
{
"epoch": 63.0078640776699,
"grad_norm": 0.0007367224316112697,
"learning_rate": 2.011866235167206e-05,
"loss": 0.001,
"step": 6570
},
{
"epoch": 63.00883495145631,
"grad_norm": 0.0009193682344630361,
"learning_rate": 2.0064724919093852e-05,
"loss": 0.0014,
"step": 6580
},
{
"epoch": 63.009805825242715,
"grad_norm": 0.0031028043013066053,
"learning_rate": 2.0010787486515643e-05,
"loss": 0.005,
"step": 6590
},
{
"epoch": 63.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.0010968446731567,
"eval_runtime": 23.7202,
"eval_samples_per_second": 1.18,
"eval_steps_per_second": 0.59,
"step": 6592
},
{
"epoch": 64.00077669902913,
"grad_norm": 0.0005543153383769095,
"learning_rate": 1.9956850053937434e-05,
"loss": 0.0094,
"step": 6600
},
{
"epoch": 64.00174757281553,
"grad_norm": 0.0005683793569914997,
"learning_rate": 1.9902912621359225e-05,
"loss": 0.0,
"step": 6610
},
{
"epoch": 64.00271844660195,
"grad_norm": 0.011601871810853481,
"learning_rate": 1.9848975188781015e-05,
"loss": 0.1088,
"step": 6620
},
{
"epoch": 64.00368932038835,
"grad_norm": 0.0008393569150939584,
"learning_rate": 1.9795037756202806e-05,
"loss": 0.0005,
"step": 6630
},
{
"epoch": 64.00466019417476,
"grad_norm": 0.029725907370448112,
"learning_rate": 1.9741100323624594e-05,
"loss": 0.0001,
"step": 6640
},
{
"epoch": 64.00563106796116,
"grad_norm": 0.0007479650084860623,
"learning_rate": 1.9687162891046388e-05,
"loss": 0.0001,
"step": 6650
},
{
"epoch": 64.00660194174758,
"grad_norm": 0.0008673795964568853,
"learning_rate": 1.9633225458468176e-05,
"loss": 0.0009,
"step": 6660
},
{
"epoch": 64.00757281553398,
"grad_norm": 0.0013320200378075242,
"learning_rate": 1.957928802588997e-05,
"loss": 0.0002,
"step": 6670
},
{
"epoch": 64.00854368932039,
"grad_norm": 0.0005077565438114107,
"learning_rate": 1.9525350593311757e-05,
"loss": 0.0001,
"step": 6680
},
{
"epoch": 64.00951456310679,
"grad_norm": 0.0005587028572335839,
"learning_rate": 1.947141316073355e-05,
"loss": 0.0,
"step": 6690
},
{
"epoch": 64.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 0.718567967414856,
"eval_runtime": 23.6794,
"eval_samples_per_second": 1.182,
"eval_steps_per_second": 0.591,
"step": 6695
},
{
"epoch": 65.0004854368932,
"grad_norm": 0.0006523833726532757,
"learning_rate": 1.941747572815534e-05,
"loss": 0.0,
"step": 6700
},
{
"epoch": 65.00145631067961,
"grad_norm": 0.009724684990942478,
"learning_rate": 1.9363538295577133e-05,
"loss": 0.3196,
"step": 6710
},
{
"epoch": 65.00242718446601,
"grad_norm": 0.0006275117048062384,
"learning_rate": 1.930960086299892e-05,
"loss": 0.0003,
"step": 6720
},
{
"epoch": 65.00339805825243,
"grad_norm": 0.0027268766425549984,
"learning_rate": 1.9255663430420712e-05,
"loss": 0.0001,
"step": 6730
},
{
"epoch": 65.00436893203883,
"grad_norm": 0.0004702035803347826,
"learning_rate": 1.9201725997842503e-05,
"loss": 0.3205,
"step": 6740
},
{
"epoch": 65.00533980582524,
"grad_norm": 0.0012323668925091624,
"learning_rate": 1.9147788565264294e-05,
"loss": 0.0003,
"step": 6750
},
{
"epoch": 65.00631067961164,
"grad_norm": 0.0007599996170029044,
"learning_rate": 1.9093851132686084e-05,
"loss": 0.7153,
"step": 6760
},
{
"epoch": 65.00728155339806,
"grad_norm": 0.0010344136971980333,
"learning_rate": 1.9039913700107875e-05,
"loss": 0.0001,
"step": 6770
},
{
"epoch": 65.00825242718446,
"grad_norm": 0.000558846746571362,
"learning_rate": 1.8985976267529666e-05,
"loss": 0.0001,
"step": 6780
},
{
"epoch": 65.00922330097087,
"grad_norm": 0.0008384157554246485,
"learning_rate": 1.8932038834951457e-05,
"loss": 0.1102,
"step": 6790
},
{
"epoch": 65.01,
"eval_accuracy": 0.75,
"eval_loss": 0.9631302952766418,
"eval_runtime": 23.2954,
"eval_samples_per_second": 1.202,
"eval_steps_per_second": 0.601,
"step": 6798
},
{
"epoch": 66.00019417475728,
"grad_norm": 0.0006994837895035744,
"learning_rate": 1.8878101402373248e-05,
"loss": 0.0573,
"step": 6800
},
{
"epoch": 66.00116504854368,
"grad_norm": 0.0016522917430847883,
"learning_rate": 1.882416396979504e-05,
"loss": 0.0029,
"step": 6810
},
{
"epoch": 66.0021359223301,
"grad_norm": 0.0006155685405246913,
"learning_rate": 1.877022653721683e-05,
"loss": 0.0044,
"step": 6820
},
{
"epoch": 66.0031067961165,
"grad_norm": 0.0016970273572951555,
"learning_rate": 1.871628910463862e-05,
"loss": 0.0001,
"step": 6830
},
{
"epoch": 66.00407766990291,
"grad_norm": 9.94116497039795,
"learning_rate": 1.866235167206041e-05,
"loss": 0.0104,
"step": 6840
},
{
"epoch": 66.00504854368933,
"grad_norm": 0.020060516893863678,
"learning_rate": 1.8608414239482202e-05,
"loss": 0.2777,
"step": 6850
},
{
"epoch": 66.00601941747573,
"grad_norm": 0.001221095328219235,
"learning_rate": 1.8554476806903993e-05,
"loss": 0.0818,
"step": 6860
},
{
"epoch": 66.00699029126214,
"grad_norm": 0.001694864360615611,
"learning_rate": 1.8500539374325784e-05,
"loss": 0.0002,
"step": 6870
},
{
"epoch": 66.00796116504854,
"grad_norm": 0.059923529624938965,
"learning_rate": 1.8446601941747575e-05,
"loss": 0.255,
"step": 6880
},
{
"epoch": 66.00893203883496,
"grad_norm": 0.0005680607282556593,
"learning_rate": 1.8392664509169363e-05,
"loss": 0.0,
"step": 6890
},
{
"epoch": 66.00990291262136,
"grad_norm": 0.06053140386939049,
"learning_rate": 1.8338727076591157e-05,
"loss": 0.0153,
"step": 6900
},
{
"epoch": 66.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 1.0193309783935547,
"eval_runtime": 24.2605,
"eval_samples_per_second": 1.154,
"eval_steps_per_second": 0.577,
"step": 6901
},
{
"epoch": 67.00087378640777,
"grad_norm": 0.002915704855695367,
"learning_rate": 1.8284789644012944e-05,
"loss": 0.0072,
"step": 6910
},
{
"epoch": 67.00184466019418,
"grad_norm": 1.9173521995544434,
"learning_rate": 1.823085221143474e-05,
"loss": 0.0008,
"step": 6920
},
{
"epoch": 67.00281553398058,
"grad_norm": 0.0010551897576078773,
"learning_rate": 1.8176914778856526e-05,
"loss": 0.0001,
"step": 6930
},
{
"epoch": 67.003786407767,
"grad_norm": 0.00146699626930058,
"learning_rate": 1.812297734627832e-05,
"loss": 0.2286,
"step": 6940
},
{
"epoch": 67.0047572815534,
"grad_norm": 0.0019446623045951128,
"learning_rate": 1.8069039913700108e-05,
"loss": 0.0001,
"step": 6950
},
{
"epoch": 67.00572815533981,
"grad_norm": 0.0017978387186303735,
"learning_rate": 1.80151024811219e-05,
"loss": 0.1882,
"step": 6960
},
{
"epoch": 67.00669902912621,
"grad_norm": 0.0010246881283819675,
"learning_rate": 1.796116504854369e-05,
"loss": 0.0261,
"step": 6970
},
{
"epoch": 67.00766990291262,
"grad_norm": 0.0016106581315398216,
"learning_rate": 1.790722761596548e-05,
"loss": 0.2689,
"step": 6980
},
{
"epoch": 67.00864077669902,
"grad_norm": 0.0004424410581123084,
"learning_rate": 1.785329018338727e-05,
"loss": 0.0008,
"step": 6990
},
{
"epoch": 67.00961165048544,
"grad_norm": 0.07792424410581589,
"learning_rate": 1.7799352750809062e-05,
"loss": 0.0001,
"step": 7000
},
{
"epoch": 67.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.34980046749115,
"eval_runtime": 24.4852,
"eval_samples_per_second": 1.144,
"eval_steps_per_second": 0.572,
"step": 7004
},
{
"epoch": 68.00058252427185,
"grad_norm": 0.0004936918267048895,
"learning_rate": 1.7745415318230853e-05,
"loss": 0.0002,
"step": 7010
},
{
"epoch": 68.00155339805825,
"grad_norm": 0.001968265511095524,
"learning_rate": 1.7691477885652644e-05,
"loss": 0.0001,
"step": 7020
},
{
"epoch": 68.00252427184466,
"grad_norm": 0.000869004987180233,
"learning_rate": 1.763754045307443e-05,
"loss": 0.2356,
"step": 7030
},
{
"epoch": 68.00349514563106,
"grad_norm": 0.0005462713888846338,
"learning_rate": 1.7583603020496226e-05,
"loss": 0.2569,
"step": 7040
},
{
"epoch": 68.00446601941748,
"grad_norm": 0.015221036970615387,
"learning_rate": 1.7529665587918013e-05,
"loss": 0.0001,
"step": 7050
},
{
"epoch": 68.00543689320388,
"grad_norm": 0.0005463761044666171,
"learning_rate": 1.7475728155339808e-05,
"loss": 0.0,
"step": 7060
},
{
"epoch": 68.00640776699029,
"grad_norm": 19.067604064941406,
"learning_rate": 1.7421790722761595e-05,
"loss": 0.0637,
"step": 7070
},
{
"epoch": 68.0073786407767,
"grad_norm": 0.0007243465515784919,
"learning_rate": 1.736785329018339e-05,
"loss": 0.0,
"step": 7080
},
{
"epoch": 68.00834951456311,
"grad_norm": 0.0008660106104798615,
"learning_rate": 1.7313915857605177e-05,
"loss": 0.0005,
"step": 7090
},
{
"epoch": 68.00932038834951,
"grad_norm": 0.0007324232719838619,
"learning_rate": 1.725997842502697e-05,
"loss": 0.0001,
"step": 7100
},
{
"epoch": 68.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.0400390625,
"eval_runtime": 23.9679,
"eval_samples_per_second": 1.168,
"eval_steps_per_second": 0.584,
"step": 7107
},
{
"epoch": 69.00029126213592,
"grad_norm": 0.0004631567280739546,
"learning_rate": 1.7206040992448762e-05,
"loss": 0.0091,
"step": 7110
},
{
"epoch": 69.00126213592233,
"grad_norm": 0.0005055834772065282,
"learning_rate": 1.715210355987055e-05,
"loss": 0.0,
"step": 7120
},
{
"epoch": 69.00223300970873,
"grad_norm": 0.000822292291559279,
"learning_rate": 1.7098166127292344e-05,
"loss": 0.0076,
"step": 7130
},
{
"epoch": 69.00320388349515,
"grad_norm": 0.0009453039965592325,
"learning_rate": 1.704422869471413e-05,
"loss": 0.1884,
"step": 7140
},
{
"epoch": 69.00417475728155,
"grad_norm": 0.0009586084634065628,
"learning_rate": 1.6990291262135926e-05,
"loss": 0.0,
"step": 7150
},
{
"epoch": 69.00514563106796,
"grad_norm": 0.31685885787010193,
"learning_rate": 1.6936353829557713e-05,
"loss": 0.005,
"step": 7160
},
{
"epoch": 69.00611650485438,
"grad_norm": 0.009163271635770798,
"learning_rate": 1.6882416396979507e-05,
"loss": 0.0002,
"step": 7170
},
{
"epoch": 69.00708737864078,
"grad_norm": 0.0014338033506646752,
"learning_rate": 1.6828478964401295e-05,
"loss": 0.0036,
"step": 7180
},
{
"epoch": 69.00805825242719,
"grad_norm": 0.0006324952701106668,
"learning_rate": 1.6774541531823086e-05,
"loss": 0.2692,
"step": 7190
},
{
"epoch": 69.00902912621359,
"grad_norm": 0.000675214163493365,
"learning_rate": 1.6720604099244877e-05,
"loss": 0.1146,
"step": 7200
},
{
"epoch": 69.01,
"grad_norm": 0.0026698948349803686,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0842,
"step": 7210
},
{
"epoch": 69.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.003670573234558,
"eval_runtime": 24.2766,
"eval_samples_per_second": 1.153,
"eval_steps_per_second": 0.577,
"step": 7210
},
{
"epoch": 70.00097087378641,
"grad_norm": 0.0008359177154488862,
"learning_rate": 1.661272923408846e-05,
"loss": 0.0,
"step": 7220
},
{
"epoch": 70.00194174757281,
"grad_norm": 0.004826887510716915,
"learning_rate": 1.655879180151025e-05,
"loss": 0.0003,
"step": 7230
},
{
"epoch": 70.00291262135923,
"grad_norm": 0.0009308085427619517,
"learning_rate": 1.650485436893204e-05,
"loss": 0.0,
"step": 7240
},
{
"epoch": 70.00388349514563,
"grad_norm": 0.0003821222053375095,
"learning_rate": 1.645091693635383e-05,
"loss": 0.0004,
"step": 7250
},
{
"epoch": 70.00485436893204,
"grad_norm": 0.00036909981281496584,
"learning_rate": 1.6396979503775622e-05,
"loss": 0.0001,
"step": 7260
},
{
"epoch": 70.00582524271844,
"grad_norm": 0.0726325735449791,
"learning_rate": 1.6343042071197413e-05,
"loss": 0.0001,
"step": 7270
},
{
"epoch": 70.00679611650486,
"grad_norm": 0.017005061730742455,
"learning_rate": 1.62891046386192e-05,
"loss": 0.0952,
"step": 7280
},
{
"epoch": 70.00776699029126,
"grad_norm": 0.0014751965645700693,
"learning_rate": 1.6235167206040995e-05,
"loss": 0.2947,
"step": 7290
},
{
"epoch": 70.00873786407767,
"grad_norm": 0.0006246562115848064,
"learning_rate": 1.6181229773462782e-05,
"loss": 0.0091,
"step": 7300
},
{
"epoch": 70.00970873786407,
"grad_norm": 0.0006601705681532621,
"learning_rate": 1.6127292340884576e-05,
"loss": 0.0001,
"step": 7310
},
{
"epoch": 70.01,
"eval_accuracy": 0.75,
"eval_loss": 1.720339059829712,
"eval_runtime": 24.4441,
"eval_samples_per_second": 1.145,
"eval_steps_per_second": 0.573,
"step": 7313
},
{
"epoch": 71.00067961165048,
"grad_norm": 0.0016067043179646134,
"learning_rate": 1.6073354908306364e-05,
"loss": 0.0002,
"step": 7320
},
{
"epoch": 71.0016504854369,
"grad_norm": 0.000492661667522043,
"learning_rate": 1.6019417475728158e-05,
"loss": 0.0778,
"step": 7330
},
{
"epoch": 71.0026213592233,
"grad_norm": 0.0019496801542118192,
"learning_rate": 1.5965480043149946e-05,
"loss": 0.0122,
"step": 7340
},
{
"epoch": 71.00359223300971,
"grad_norm": 0.0010925508104264736,
"learning_rate": 1.5911542610571736e-05,
"loss": 0.0001,
"step": 7350
},
{
"epoch": 71.00456310679611,
"grad_norm": 0.0005245591746643186,
"learning_rate": 1.5857605177993527e-05,
"loss": 0.3084,
"step": 7360
},
{
"epoch": 71.00553398058253,
"grad_norm": 0.0008980587008409202,
"learning_rate": 1.5803667745415318e-05,
"loss": 0.0019,
"step": 7370
},
{
"epoch": 71.00650485436893,
"grad_norm": 0.0018496790435165167,
"learning_rate": 1.574973031283711e-05,
"loss": 0.0001,
"step": 7380
},
{
"epoch": 71.00747572815534,
"grad_norm": 0.05308768153190613,
"learning_rate": 1.56957928802589e-05,
"loss": 0.0001,
"step": 7390
},
{
"epoch": 71.00844660194174,
"grad_norm": 30.78109359741211,
"learning_rate": 1.564185544768069e-05,
"loss": 0.3489,
"step": 7400
},
{
"epoch": 71.00941747572816,
"grad_norm": 0.002415592549368739,
"learning_rate": 1.5587918015102482e-05,
"loss": 0.0001,
"step": 7410
},
{
"epoch": 71.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.1505813598632812,
"eval_runtime": 23.8273,
"eval_samples_per_second": 1.175,
"eval_steps_per_second": 0.588,
"step": 7416
},
{
"epoch": 72.00038834951457,
"grad_norm": 0.0009659325587563217,
"learning_rate": 1.5533980582524273e-05,
"loss": 0.2766,
"step": 7420
},
{
"epoch": 72.00135922330097,
"grad_norm": 0.0008527355967089534,
"learning_rate": 1.5480043149946064e-05,
"loss": 0.0,
"step": 7430
},
{
"epoch": 72.00233009708738,
"grad_norm": 0.0005238853627815843,
"learning_rate": 1.5426105717367854e-05,
"loss": 0.0002,
"step": 7440
},
{
"epoch": 72.00330097087378,
"grad_norm": 0.0005107687320560217,
"learning_rate": 1.5372168284789645e-05,
"loss": 0.0001,
"step": 7450
},
{
"epoch": 72.0042718446602,
"grad_norm": 0.00111380685120821,
"learning_rate": 1.5318230852211436e-05,
"loss": 0.0001,
"step": 7460
},
{
"epoch": 72.0052427184466,
"grad_norm": 0.008704396896064281,
"learning_rate": 1.5264293419633227e-05,
"loss": 0.0629,
"step": 7470
},
{
"epoch": 72.00621359223301,
"grad_norm": 0.001821440877392888,
"learning_rate": 1.5210355987055016e-05,
"loss": 0.0,
"step": 7480
},
{
"epoch": 72.00718446601942,
"grad_norm": 0.016031334176659584,
"learning_rate": 1.5156418554476809e-05,
"loss": 0.0001,
"step": 7490
},
{
"epoch": 72.00815533980582,
"grad_norm": 0.0005962299765087664,
"learning_rate": 1.5102481121898598e-05,
"loss": 0.2666,
"step": 7500
},
{
"epoch": 72.00912621359224,
"grad_norm": 0.0006452718516811728,
"learning_rate": 1.5048543689320387e-05,
"loss": 0.0001,
"step": 7510
},
{
"epoch": 72.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.3944032192230225,
"eval_runtime": 23.49,
"eval_samples_per_second": 1.192,
"eval_steps_per_second": 0.596,
"step": 7519
},
{
"epoch": 73.00009708737863,
"grad_norm": 0.00074690644396469,
"learning_rate": 1.499460625674218e-05,
"loss": 0.0001,
"step": 7520
},
{
"epoch": 73.00106796116505,
"grad_norm": 0.0009000824647955596,
"learning_rate": 1.4940668824163969e-05,
"loss": 0.0001,
"step": 7530
},
{
"epoch": 73.00203883495146,
"grad_norm": 0.0005898684030398726,
"learning_rate": 1.4886731391585762e-05,
"loss": 0.0002,
"step": 7540
},
{
"epoch": 73.00300970873786,
"grad_norm": 0.00045732298167422414,
"learning_rate": 1.483279395900755e-05,
"loss": 0.0007,
"step": 7550
},
{
"epoch": 73.00398058252428,
"grad_norm": 0.002637525787577033,
"learning_rate": 1.4778856526429343e-05,
"loss": 0.0001,
"step": 7560
},
{
"epoch": 73.00495145631068,
"grad_norm": 0.0015487922355532646,
"learning_rate": 1.4724919093851133e-05,
"loss": 0.0001,
"step": 7570
},
{
"epoch": 73.00592233009709,
"grad_norm": 0.0009302161633968353,
"learning_rate": 1.4670981661272923e-05,
"loss": 0.0039,
"step": 7580
},
{
"epoch": 73.0068932038835,
"grad_norm": 0.03521721437573433,
"learning_rate": 1.4617044228694714e-05,
"loss": 0.0001,
"step": 7590
},
{
"epoch": 73.00786407766991,
"grad_norm": 0.002057681092992425,
"learning_rate": 1.4563106796116505e-05,
"loss": 0.0001,
"step": 7600
},
{
"epoch": 73.00883495145631,
"grad_norm": 0.0013950722059234977,
"learning_rate": 1.4509169363538298e-05,
"loss": 0.0001,
"step": 7610
},
{
"epoch": 73.00980582524272,
"grad_norm": 0.0005244753556326032,
"learning_rate": 1.4455231930960087e-05,
"loss": 0.0632,
"step": 7620
},
{
"epoch": 73.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 1.3018865585327148,
"eval_runtime": 24.294,
"eval_samples_per_second": 1.153,
"eval_steps_per_second": 0.576,
"step": 7622
},
{
"epoch": 74.00077669902913,
"grad_norm": 0.5469286441802979,
"learning_rate": 1.440129449838188e-05,
"loss": 0.0893,
"step": 7630
},
{
"epoch": 74.00174757281553,
"grad_norm": 0.0006303411792032421,
"learning_rate": 1.4347357065803669e-05,
"loss": 0.0004,
"step": 7640
},
{
"epoch": 74.00271844660195,
"grad_norm": 0.0004991823807358742,
"learning_rate": 1.4293419633225461e-05,
"loss": 0.0,
"step": 7650
},
{
"epoch": 74.00368932038835,
"grad_norm": 0.0005889665917493403,
"learning_rate": 1.423948220064725e-05,
"loss": 0.0001,
"step": 7660
},
{
"epoch": 74.00466019417476,
"grad_norm": 0.0007777425344102085,
"learning_rate": 1.418554476806904e-05,
"loss": 0.0002,
"step": 7670
},
{
"epoch": 74.00563106796116,
"grad_norm": 0.001111809629946947,
"learning_rate": 1.4131607335490832e-05,
"loss": 0.1946,
"step": 7680
},
{
"epoch": 74.00660194174758,
"grad_norm": 0.0008204812766052783,
"learning_rate": 1.4077669902912621e-05,
"loss": 0.0674,
"step": 7690
},
{
"epoch": 74.00757281553398,
"grad_norm": 0.0011531045893207192,
"learning_rate": 1.4023732470334414e-05,
"loss": 0.0001,
"step": 7700
},
{
"epoch": 74.00854368932039,
"grad_norm": 0.0034655483905225992,
"learning_rate": 1.3969795037756203e-05,
"loss": 0.0001,
"step": 7710
},
{
"epoch": 74.00951456310679,
"grad_norm": 0.0005608770297840238,
"learning_rate": 1.3915857605177996e-05,
"loss": 0.0424,
"step": 7720
},
{
"epoch": 74.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 1.0466877222061157,
"eval_runtime": 24.4694,
"eval_samples_per_second": 1.144,
"eval_steps_per_second": 0.572,
"step": 7725
},
{
"epoch": 75.0004854368932,
"grad_norm": 0.0006410989444702864,
"learning_rate": 1.3861920172599785e-05,
"loss": 0.0001,
"step": 7730
},
{
"epoch": 75.00145631067961,
"grad_norm": 0.0010539211798459291,
"learning_rate": 1.3807982740021574e-05,
"loss": 0.2893,
"step": 7740
},
{
"epoch": 75.00242718446601,
"grad_norm": 0.000947575899772346,
"learning_rate": 1.3754045307443367e-05,
"loss": 0.0001,
"step": 7750
},
{
"epoch": 75.00339805825243,
"grad_norm": 0.0005764906527474523,
"learning_rate": 1.3700107874865156e-05,
"loss": 0.0,
"step": 7760
},
{
"epoch": 75.00436893203883,
"grad_norm": 0.000651567883323878,
"learning_rate": 1.3646170442286949e-05,
"loss": 0.0002,
"step": 7770
},
{
"epoch": 75.00533980582524,
"grad_norm": 90.93943786621094,
"learning_rate": 1.3592233009708738e-05,
"loss": 0.2704,
"step": 7780
},
{
"epoch": 75.00631067961164,
"grad_norm": 0.0006675939657725394,
"learning_rate": 1.353829557713053e-05,
"loss": 0.0,
"step": 7790
},
{
"epoch": 75.00728155339806,
"grad_norm": 0.0005555550451390445,
"learning_rate": 1.348435814455232e-05,
"loss": 0.0,
"step": 7800
},
{
"epoch": 75.00825242718446,
"grad_norm": 0.000521104724612087,
"learning_rate": 1.3430420711974109e-05,
"loss": 0.0002,
"step": 7810
},
{
"epoch": 75.00922330097087,
"grad_norm": 0.0005199057050049305,
"learning_rate": 1.3376483279395901e-05,
"loss": 0.0001,
"step": 7820
},
{
"epoch": 75.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.1821573972702026,
"eval_runtime": 25.5111,
"eval_samples_per_second": 1.098,
"eval_steps_per_second": 0.549,
"step": 7828
},
{
"epoch": 76.00019417475728,
"grad_norm": 27.20473289489746,
"learning_rate": 1.332254584681769e-05,
"loss": 0.1007,
"step": 7830
},
{
"epoch": 76.00116504854368,
"grad_norm": 0.00041796735604293644,
"learning_rate": 1.3268608414239483e-05,
"loss": 0.0016,
"step": 7840
},
{
"epoch": 76.0021359223301,
"grad_norm": 0.00063418282661587,
"learning_rate": 1.3214670981661272e-05,
"loss": 0.0003,
"step": 7850
},
{
"epoch": 76.0031067961165,
"grad_norm": 75.21340942382812,
"learning_rate": 1.3160733549083065e-05,
"loss": 0.1322,
"step": 7860
},
{
"epoch": 76.00407766990291,
"grad_norm": 0.0009397820685990155,
"learning_rate": 1.3106796116504854e-05,
"loss": 0.0001,
"step": 7870
},
{
"epoch": 76.00504854368933,
"grad_norm": 0.013500092551112175,
"learning_rate": 1.3052858683926647e-05,
"loss": 0.0,
"step": 7880
},
{
"epoch": 76.00601941747573,
"grad_norm": 0.0008127112523652613,
"learning_rate": 1.2998921251348436e-05,
"loss": 0.0,
"step": 7890
},
{
"epoch": 76.00699029126214,
"grad_norm": 0.0028982115909457207,
"learning_rate": 1.2944983818770227e-05,
"loss": 0.0074,
"step": 7900
},
{
"epoch": 76.00796116504854,
"grad_norm": 0.0021881507709622383,
"learning_rate": 1.2891046386192018e-05,
"loss": 0.4177,
"step": 7910
},
{
"epoch": 76.00893203883496,
"grad_norm": 0.00042111569200642407,
"learning_rate": 1.2837108953613808e-05,
"loss": 0.0001,
"step": 7920
},
{
"epoch": 76.00990291262136,
"grad_norm": 0.001411316217854619,
"learning_rate": 1.27831715210356e-05,
"loss": 0.1475,
"step": 7930
},
{
"epoch": 76.01,
"eval_accuracy": 0.75,
"eval_loss": 1.4204529523849487,
"eval_runtime": 23.5553,
"eval_samples_per_second": 1.189,
"eval_steps_per_second": 0.594,
"step": 7931
},
{
"epoch": 77.00087378640777,
"grad_norm": 0.0007739585707895458,
"learning_rate": 1.272923408845739e-05,
"loss": 0.0291,
"step": 7940
},
{
"epoch": 77.00184466019418,
"grad_norm": 0.0011331046698614955,
"learning_rate": 1.2675296655879181e-05,
"loss": 0.049,
"step": 7950
},
{
"epoch": 77.00281553398058,
"grad_norm": 0.0007495753234252334,
"learning_rate": 1.2621359223300972e-05,
"loss": 0.0483,
"step": 7960
},
{
"epoch": 77.003786407767,
"grad_norm": 0.0008724365616217256,
"learning_rate": 1.2567421790722761e-05,
"loss": 0.191,
"step": 7970
},
{
"epoch": 77.0047572815534,
"grad_norm": 0.001050332561135292,
"learning_rate": 1.2513484358144554e-05,
"loss": 0.0887,
"step": 7980
},
{
"epoch": 77.00572815533981,
"grad_norm": 0.0004873993748333305,
"learning_rate": 1.2459546925566345e-05,
"loss": 0.04,
"step": 7990
},
{
"epoch": 77.00669902912621,
"grad_norm": 0.0004962868988513947,
"learning_rate": 1.2405609492988135e-05,
"loss": 0.0686,
"step": 8000
},
{
"epoch": 77.00766990291262,
"grad_norm": 0.0015225057723000646,
"learning_rate": 1.2351672060409925e-05,
"loss": 0.0,
"step": 8010
},
{
"epoch": 77.00864077669902,
"grad_norm": 0.0019491496495902538,
"learning_rate": 1.2297734627831716e-05,
"loss": 0.0,
"step": 8020
},
{
"epoch": 77.00961165048544,
"grad_norm": 0.0008197619463317096,
"learning_rate": 1.2243797195253506e-05,
"loss": 0.0013,
"step": 8030
},
{
"epoch": 77.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.4495819807052612,
"eval_runtime": 23.581,
"eval_samples_per_second": 1.187,
"eval_steps_per_second": 0.594,
"step": 8034
},
{
"epoch": 78.00058252427185,
"grad_norm": 0.0007247371831908822,
"learning_rate": 1.2189859762675297e-05,
"loss": 0.0007,
"step": 8040
},
{
"epoch": 78.00155339805825,
"grad_norm": 0.0006012579542584717,
"learning_rate": 1.2135922330097088e-05,
"loss": 0.0024,
"step": 8050
},
{
"epoch": 78.00252427184466,
"grad_norm": 0.005412137135863304,
"learning_rate": 1.2081984897518879e-05,
"loss": 0.0,
"step": 8060
},
{
"epoch": 78.00349514563106,
"grad_norm": 0.0036323072854429483,
"learning_rate": 1.202804746494067e-05,
"loss": 0.0,
"step": 8070
},
{
"epoch": 78.00446601941748,
"grad_norm": 0.0008606135379523039,
"learning_rate": 1.197411003236246e-05,
"loss": 0.0,
"step": 8080
},
{
"epoch": 78.00543689320388,
"grad_norm": 0.0010280663846060634,
"learning_rate": 1.192017259978425e-05,
"loss": 0.1186,
"step": 8090
},
{
"epoch": 78.00640776699029,
"grad_norm": 0.0005725217051804066,
"learning_rate": 1.1866235167206041e-05,
"loss": 0.0001,
"step": 8100
},
{
"epoch": 78.0073786407767,
"grad_norm": 0.0004487498663365841,
"learning_rate": 1.1812297734627832e-05,
"loss": 0.0111,
"step": 8110
},
{
"epoch": 78.00834951456311,
"grad_norm": 0.001997926738113165,
"learning_rate": 1.1758360302049623e-05,
"loss": 0.0,
"step": 8120
},
{
"epoch": 78.00932038834951,
"grad_norm": 0.0003361359122209251,
"learning_rate": 1.1704422869471414e-05,
"loss": 0.1455,
"step": 8130
},
{
"epoch": 78.01,
"eval_accuracy": 0.75,
"eval_loss": 1.4418976306915283,
"eval_runtime": 24.492,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 0.572,
"step": 8137
},
{
"epoch": 79.00029126213592,
"grad_norm": 0.001469839597120881,
"learning_rate": 1.1650485436893204e-05,
"loss": 0.0577,
"step": 8140
},
{
"epoch": 79.00126213592233,
"grad_norm": 0.0005355125176720321,
"learning_rate": 1.1596548004314995e-05,
"loss": 0.0001,
"step": 8150
},
{
"epoch": 79.00223300970873,
"grad_norm": 0.0054461769759655,
"learning_rate": 1.1542610571736785e-05,
"loss": 0.1018,
"step": 8160
},
{
"epoch": 79.00320388349515,
"grad_norm": 0.0068475124426186085,
"learning_rate": 1.1488673139158575e-05,
"loss": 0.0,
"step": 8170
},
{
"epoch": 79.00417475728155,
"grad_norm": 0.0009039226570166647,
"learning_rate": 1.1434735706580366e-05,
"loss": 0.0,
"step": 8180
},
{
"epoch": 79.00514563106796,
"grad_norm": 0.0010757894488051534,
"learning_rate": 1.1380798274002157e-05,
"loss": 0.0003,
"step": 8190
},
{
"epoch": 79.00611650485438,
"grad_norm": 0.0009386289748363197,
"learning_rate": 1.1326860841423948e-05,
"loss": 0.0,
"step": 8200
},
{
"epoch": 79.00708737864078,
"grad_norm": 0.000584886409342289,
"learning_rate": 1.1272923408845739e-05,
"loss": 0.0001,
"step": 8210
},
{
"epoch": 79.00805825242719,
"grad_norm": 0.003581673838198185,
"learning_rate": 1.121898597626753e-05,
"loss": 0.0001,
"step": 8220
},
{
"epoch": 79.00902912621359,
"grad_norm": 0.0006209689890965819,
"learning_rate": 1.116504854368932e-05,
"loss": 0.0,
"step": 8230
},
{
"epoch": 79.01,
"grad_norm": 0.000511133810505271,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.0,
"step": 8240
},
{
"epoch": 79.01,
"eval_accuracy": 0.75,
"eval_loss": 1.364740252494812,
"eval_runtime": 22.1933,
"eval_samples_per_second": 1.262,
"eval_steps_per_second": 0.631,
"step": 8240
},
{
"epoch": 80.00097087378641,
"grad_norm": 0.0008854233892634511,
"learning_rate": 1.1057173678532903e-05,
"loss": 0.0003,
"step": 8250
},
{
"epoch": 80.00194174757281,
"grad_norm": 0.0014848372666165233,
"learning_rate": 1.1003236245954693e-05,
"loss": 0.0002,
"step": 8260
},
{
"epoch": 80.00291262135923,
"grad_norm": 0.0003331179323140532,
"learning_rate": 1.0949298813376484e-05,
"loss": 0.0009,
"step": 8270
},
{
"epoch": 80.00388349514563,
"grad_norm": 0.003774071577936411,
"learning_rate": 1.0895361380798275e-05,
"loss": 0.2251,
"step": 8280
},
{
"epoch": 80.00485436893204,
"grad_norm": 0.004081737250089645,
"learning_rate": 1.0841423948220066e-05,
"loss": 0.0022,
"step": 8290
},
{
"epoch": 80.00582524271844,
"grad_norm": 0.0017505354480817914,
"learning_rate": 1.0787486515641857e-05,
"loss": 0.2503,
"step": 8300
},
{
"epoch": 80.00679611650486,
"grad_norm": 0.0007621512049809098,
"learning_rate": 1.0733549083063648e-05,
"loss": 0.0006,
"step": 8310
},
{
"epoch": 80.00776699029126,
"grad_norm": 0.000708577164914459,
"learning_rate": 1.0679611650485437e-05,
"loss": 0.0,
"step": 8320
},
{
"epoch": 80.00873786407767,
"grad_norm": 0.0012766749132424593,
"learning_rate": 1.0625674217907228e-05,
"loss": 0.0152,
"step": 8330
},
{
"epoch": 80.00970873786407,
"grad_norm": 0.0017332998104393482,
"learning_rate": 1.0571736785329019e-05,
"loss": 0.0,
"step": 8340
},
{
"epoch": 80.01,
"eval_accuracy": 0.7857142857142857,
"eval_loss": 1.295793890953064,
"eval_runtime": 22.0647,
"eval_samples_per_second": 1.269,
"eval_steps_per_second": 0.634,
"step": 8343
},
{
"epoch": 81.00067961165048,
"grad_norm": 0.0007152267498895526,
"learning_rate": 1.051779935275081e-05,
"loss": 0.0001,
"step": 8350
},
{
"epoch": 81.0016504854369,
"grad_norm": 0.0007688335026614368,
"learning_rate": 1.04638619201726e-05,
"loss": 0.0,
"step": 8360
},
{
"epoch": 81.0026213592233,
"grad_norm": 0.000391281268093735,
"learning_rate": 1.0409924487594391e-05,
"loss": 0.0001,
"step": 8370
},
{
"epoch": 81.00359223300971,
"grad_norm": 0.0010772079695016146,
"learning_rate": 1.0355987055016182e-05,
"loss": 0.0,
"step": 8380
},
{
"epoch": 81.00456310679611,
"grad_norm": 0.0007571936585009098,
"learning_rate": 1.0302049622437973e-05,
"loss": 0.0,
"step": 8390
},
{
"epoch": 81.00553398058253,
"grad_norm": 0.004109963774681091,
"learning_rate": 1.0248112189859762e-05,
"loss": 0.1323,
"step": 8400
},
{
"epoch": 81.00650485436893,
"grad_norm": 0.002082277089357376,
"learning_rate": 1.0194174757281553e-05,
"loss": 0.0797,
"step": 8410
},
{
"epoch": 81.00747572815534,
"grad_norm": 0.0007720484863966703,
"learning_rate": 1.0140237324703344e-05,
"loss": 0.0001,
"step": 8420
},
{
"epoch": 81.00844660194174,
"grad_norm": 0.0007060917560011148,
"learning_rate": 1.0086299892125135e-05,
"loss": 0.0,
"step": 8430
},
{
"epoch": 81.00941747572816,
"grad_norm": 0.002525564981624484,
"learning_rate": 1.0032362459546926e-05,
"loss": 0.0005,
"step": 8440
},
{
"epoch": 81.01,
"eval_accuracy": 0.8214285714285714,
"eval_loss": 1.25356924533844,
"eval_runtime": 23.2168,
"eval_samples_per_second": 1.206,
"eval_steps_per_second": 0.603,
"step": 8446
},
{
"epoch": 82.00038834951457,
"grad_norm": 0.0004880694905295968,
"learning_rate": 9.978425026968717e-06,
"loss": 0.2605,
"step": 8450
},
{
"epoch": 82.00135922330097,
"grad_norm": 0.0019061871571466327,
"learning_rate": 9.924487594390508e-06,
"loss": 0.0002,
"step": 8460
},
{
"epoch": 82.00233009708738,
"grad_norm": 0.0003495465498417616,
"learning_rate": 9.870550161812297e-06,
"loss": 0.0,
"step": 8470
},
{
"epoch": 82.00330097087378,
"grad_norm": 0.0003973764833062887,
"learning_rate": 9.816612729234088e-06,
"loss": 0.0,
"step": 8480
},
{
"epoch": 82.0042718446602,
"grad_norm": 0.00119751354213804,
"learning_rate": 9.762675296655879e-06,
"loss": 0.0,
"step": 8490
},
{
"epoch": 82.0052427184466,
"grad_norm": 0.002419657539576292,
"learning_rate": 9.70873786407767e-06,
"loss": 0.0003,
"step": 8500
},
{
"epoch": 82.00621359223301,
"grad_norm": 0.0020702662877738476,
"learning_rate": 9.65480043149946e-06,
"loss": 0.0004,
"step": 8510
},
{
"epoch": 82.00718446601942,
"grad_norm": 0.0005920990370213985,
"learning_rate": 9.600862998921251e-06,
"loss": 0.0001,
"step": 8520
},
{
"epoch": 82.00815533980582,
"grad_norm": 0.000470437400508672,
"learning_rate": 9.546925566343042e-06,
"loss": 0.0,
"step": 8530
},
{
"epoch": 82.00912621359224,
"grad_norm": 0.00858411192893982,
"learning_rate": 9.492988133764833e-06,
"loss": 0.0411,
"step": 8540
},
{
"epoch": 82.01,
"eval_accuracy": 0.8571428571428571,
"eval_loss": 1.2219018936157227,
"eval_runtime": 20.1232,
"eval_samples_per_second": 1.391,
"eval_steps_per_second": 0.696,
"step": 8549
},
{
"epoch": 83.00009708737863,
"grad_norm": 0.0005051534972153604,
"learning_rate": 9.439050701186624e-06,
"loss": 0.0,
"step": 8550
},
{
"epoch": 83.00106796116505,
"grad_norm": 0.0038792623672634363,
"learning_rate": 9.385113268608415e-06,
"loss": 0.0018,
"step": 8560
},
{
"epoch": 83.00203883495146,
"grad_norm": 0.010000643320381641,
"learning_rate": 9.331175836030206e-06,
"loss": 0.0,
"step": 8570
},
{
"epoch": 83.00300970873786,
"grad_norm": 0.0011511164484545588,
"learning_rate": 9.277238403451997e-06,
"loss": 0.0004,
"step": 8580
},
{
"epoch": 83.00398058252428,
"grad_norm": 0.0007623996352776885,
"learning_rate": 9.223300970873788e-06,
"loss": 0.0,
"step": 8590
},
{
"epoch": 83.00495145631068,
"grad_norm": 37.7863883972168,
"learning_rate": 9.169363538295578e-06,
"loss": 0.0437,
"step": 8600
},
{
"epoch": 83.00592233009709,
"grad_norm": 0.001496079028584063,
"learning_rate": 9.11542610571737e-06,
"loss": 0.0001,
"step": 8610
},
{
"epoch": 83.0068932038835,
"grad_norm": 0.0007690541679039598,
"learning_rate": 9.06148867313916e-06,
"loss": 0.0,
"step": 8620
},
{
"epoch": 83.00786407766991,
"grad_norm": 0.0005045629222877324,
"learning_rate": 9.00755124056095e-06,
"loss": 0.0591,
"step": 8630
},
{
"epoch": 83.00883495145631,
"grad_norm": 0.0007508267881348729,
"learning_rate": 8.95361380798274e-06,
"loss": 0.0051,
"step": 8640
},
{
"epoch": 83.00980582524272,
"grad_norm": 0.0006566194351762533,
"learning_rate": 8.899676375404531e-06,
"loss": 0.0,
"step": 8650
},
{
"epoch": 83.01,
"eval_accuracy": 0.8928571428571429,
"eval_loss": 0.7256796956062317,
"eval_runtime": 20.7837,
"eval_samples_per_second": 1.347,
"eval_steps_per_second": 0.674,
"step": 8652
},
{
"epoch": 84.00077669902913,
"grad_norm": 0.0004185517900623381,
"learning_rate": 8.845738942826322e-06,
"loss": 0.0,
"step": 8660
},
{
"epoch": 84.00174757281553,
"grad_norm": 14.310250282287598,
"learning_rate": 8.791801510248113e-06,
"loss": 0.0213,
"step": 8670
},
{
"epoch": 84.00271844660195,
"grad_norm": 0.00037505649379454553,
"learning_rate": 8.737864077669904e-06,
"loss": 0.0717,
"step": 8680
},
{
"epoch": 84.00368932038835,
"grad_norm": 0.0026767821982502937,
"learning_rate": 8.683926645091695e-06,
"loss": 0.2151,
"step": 8690
},
{
"epoch": 84.00466019417476,
"grad_norm": 0.001046729856170714,
"learning_rate": 8.629989212513486e-06,
"loss": 0.0,
"step": 8700
},
{
"epoch": 84.00563106796116,
"grad_norm": 0.003536473959684372,
"learning_rate": 8.576051779935275e-06,
"loss": 0.1663,
"step": 8710
},
{
"epoch": 84.00660194174758,
"grad_norm": 0.0006385446176864207,
"learning_rate": 8.522114347357066e-06,
"loss": 0.1303,
"step": 8720
},
{
"epoch": 84.00757281553398,
"grad_norm": 0.023310404270887375,
"learning_rate": 8.468176914778857e-06,
"loss": 0.0184,
"step": 8730
},
{
"epoch": 84.00854368932039,
"grad_norm": 0.0003330286417622119,
"learning_rate": 8.414239482200647e-06,
"loss": 0.0018,
"step": 8740
},
{
"epoch": 84.00951456310679,
"grad_norm": 0.0005678732413798571,
"learning_rate": 8.360302049622438e-06,
"loss": 0.0,
"step": 8750
},
{
"epoch": 84.01,
"eval_accuracy": 0.9285714285714286,
"eval_loss": 0.6211844086647034,
"eval_runtime": 20.312,
"eval_samples_per_second": 1.378,
"eval_steps_per_second": 0.689,
"step": 8755
}
],
"logging_steps": 10,
"max_steps": 10300,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 4.486456305987748e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}