minhah's picture
End of training
a3890fb verified
raw
history blame
49.6 kB
{
"best_metric": 0.72751677852349,
"best_model_checkpoint": "vivit-b-16x2-kinetics400-finetuned-crema-d/checkpoint-2976",
"epoch": 3.248991935483871,
"eval_steps": 500,
"global_step": 2976,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 8.909350395202637,
"learning_rate": 1.6778523489932886e-06,
"loss": 1.8749,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 10.488879203796387,
"learning_rate": 3.3557046979865773e-06,
"loss": 1.8028,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 9.420026779174805,
"learning_rate": 5.033557046979865e-06,
"loss": 1.7855,
"step": 30
},
{
"epoch": 0.01,
"grad_norm": 8.927688598632812,
"learning_rate": 6.7114093959731546e-06,
"loss": 1.8411,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 10.278192520141602,
"learning_rate": 8.389261744966444e-06,
"loss": 1.7949,
"step": 50
},
{
"epoch": 0.02,
"grad_norm": 8.889835357666016,
"learning_rate": 1.006711409395973e-05,
"loss": 1.7569,
"step": 60
},
{
"epoch": 0.02,
"grad_norm": 8.13683795928955,
"learning_rate": 1.174496644295302e-05,
"loss": 1.677,
"step": 70
},
{
"epoch": 0.03,
"grad_norm": 12.050573348999023,
"learning_rate": 1.3422818791946309e-05,
"loss": 1.6716,
"step": 80
},
{
"epoch": 0.03,
"grad_norm": 12.406548500061035,
"learning_rate": 1.51006711409396e-05,
"loss": 1.6021,
"step": 90
},
{
"epoch": 0.03,
"grad_norm": 9.213874816894531,
"learning_rate": 1.6778523489932888e-05,
"loss": 1.5515,
"step": 100
},
{
"epoch": 0.04,
"grad_norm": 7.81805419921875,
"learning_rate": 1.8456375838926178e-05,
"loss": 1.4358,
"step": 110
},
{
"epoch": 0.04,
"grad_norm": 7.475973606109619,
"learning_rate": 2.013422818791946e-05,
"loss": 1.4428,
"step": 120
},
{
"epoch": 0.04,
"grad_norm": 8.327278137207031,
"learning_rate": 2.181208053691275e-05,
"loss": 1.5263,
"step": 130
},
{
"epoch": 0.05,
"grad_norm": 8.09311580657959,
"learning_rate": 2.348993288590604e-05,
"loss": 1.3745,
"step": 140
},
{
"epoch": 0.05,
"grad_norm": 11.201431274414062,
"learning_rate": 2.516778523489933e-05,
"loss": 1.2682,
"step": 150
},
{
"epoch": 0.05,
"grad_norm": 10.589693069458008,
"learning_rate": 2.6845637583892618e-05,
"loss": 1.2924,
"step": 160
},
{
"epoch": 0.06,
"grad_norm": 12.334148406982422,
"learning_rate": 2.8523489932885905e-05,
"loss": 1.3283,
"step": 170
},
{
"epoch": 0.06,
"grad_norm": 11.779102325439453,
"learning_rate": 3.02013422818792e-05,
"loss": 1.5154,
"step": 180
},
{
"epoch": 0.06,
"grad_norm": 7.225616455078125,
"learning_rate": 3.1879194630872485e-05,
"loss": 1.3195,
"step": 190
},
{
"epoch": 0.07,
"grad_norm": 10.551006317138672,
"learning_rate": 3.3557046979865775e-05,
"loss": 1.3953,
"step": 200
},
{
"epoch": 0.07,
"grad_norm": 9.995854377746582,
"learning_rate": 3.523489932885906e-05,
"loss": 1.1524,
"step": 210
},
{
"epoch": 0.07,
"grad_norm": 8.858394622802734,
"learning_rate": 3.6912751677852356e-05,
"loss": 1.1582,
"step": 220
},
{
"epoch": 0.08,
"grad_norm": 14.140151977539062,
"learning_rate": 3.859060402684564e-05,
"loss": 1.2541,
"step": 230
},
{
"epoch": 0.08,
"grad_norm": 12.274311065673828,
"learning_rate": 4.026845637583892e-05,
"loss": 1.2666,
"step": 240
},
{
"epoch": 0.08,
"grad_norm": 9.257830619812012,
"learning_rate": 4.194630872483222e-05,
"loss": 1.1348,
"step": 250
},
{
"epoch": 0.09,
"grad_norm": 8.932195663452148,
"learning_rate": 4.36241610738255e-05,
"loss": 1.2301,
"step": 260
},
{
"epoch": 0.09,
"grad_norm": 8.28331470489502,
"learning_rate": 4.530201342281879e-05,
"loss": 1.15,
"step": 270
},
{
"epoch": 0.09,
"grad_norm": 14.461810111999512,
"learning_rate": 4.697986577181208e-05,
"loss": 1.1571,
"step": 280
},
{
"epoch": 0.1,
"grad_norm": 17.485107421875,
"learning_rate": 4.865771812080537e-05,
"loss": 1.0857,
"step": 290
},
{
"epoch": 0.1,
"grad_norm": 7.844751834869385,
"learning_rate": 4.996265870052278e-05,
"loss": 1.2185,
"step": 300
},
{
"epoch": 0.1,
"grad_norm": 9.986318588256836,
"learning_rate": 4.977595220313667e-05,
"loss": 1.2979,
"step": 310
},
{
"epoch": 0.11,
"grad_norm": 7.930599689483643,
"learning_rate": 4.958924570575056e-05,
"loss": 1.1913,
"step": 320
},
{
"epoch": 0.11,
"grad_norm": 14.08823013305664,
"learning_rate": 4.9402539208364454e-05,
"loss": 1.0968,
"step": 330
},
{
"epoch": 0.11,
"grad_norm": 9.71191120147705,
"learning_rate": 4.9215832710978346e-05,
"loss": 1.0469,
"step": 340
},
{
"epoch": 0.12,
"grad_norm": 11.69392204284668,
"learning_rate": 4.902912621359224e-05,
"loss": 1.152,
"step": 350
},
{
"epoch": 0.12,
"grad_norm": 12.762391090393066,
"learning_rate": 4.884241971620613e-05,
"loss": 1.3083,
"step": 360
},
{
"epoch": 0.12,
"grad_norm": 9.314635276794434,
"learning_rate": 4.8655713218820016e-05,
"loss": 1.0477,
"step": 370
},
{
"epoch": 0.13,
"grad_norm": 10.564154624938965,
"learning_rate": 4.846900672143391e-05,
"loss": 1.0316,
"step": 380
},
{
"epoch": 0.13,
"grad_norm": 14.425047874450684,
"learning_rate": 4.82823002240478e-05,
"loss": 1.1457,
"step": 390
},
{
"epoch": 0.13,
"grad_norm": 11.951525688171387,
"learning_rate": 4.809559372666169e-05,
"loss": 1.1723,
"step": 400
},
{
"epoch": 0.14,
"grad_norm": 8.284050941467285,
"learning_rate": 4.790888722927558e-05,
"loss": 1.0312,
"step": 410
},
{
"epoch": 0.14,
"grad_norm": 9.24317455291748,
"learning_rate": 4.772218073188947e-05,
"loss": 1.2024,
"step": 420
},
{
"epoch": 0.14,
"grad_norm": 5.5853590965271,
"learning_rate": 4.753547423450336e-05,
"loss": 0.9976,
"step": 430
},
{
"epoch": 0.15,
"grad_norm": 8.923686027526855,
"learning_rate": 4.7348767737117256e-05,
"loss": 1.0393,
"step": 440
},
{
"epoch": 0.15,
"grad_norm": 5.833179950714111,
"learning_rate": 4.716206123973114e-05,
"loss": 0.8951,
"step": 450
},
{
"epoch": 0.15,
"grad_norm": 8.868228912353516,
"learning_rate": 4.697535474234503e-05,
"loss": 0.997,
"step": 460
},
{
"epoch": 0.16,
"grad_norm": 9.04990005493164,
"learning_rate": 4.6788648244958926e-05,
"loss": 0.8222,
"step": 470
},
{
"epoch": 0.16,
"grad_norm": 7.88612699508667,
"learning_rate": 4.660194174757282e-05,
"loss": 1.2196,
"step": 480
},
{
"epoch": 0.16,
"grad_norm": 9.290902137756348,
"learning_rate": 4.6415235250186703e-05,
"loss": 1.1256,
"step": 490
},
{
"epoch": 0.17,
"grad_norm": 6.916901111602783,
"learning_rate": 4.6228528752800596e-05,
"loss": 1.2263,
"step": 500
},
{
"epoch": 0.17,
"grad_norm": 4.902709484100342,
"learning_rate": 4.604182225541449e-05,
"loss": 0.9711,
"step": 510
},
{
"epoch": 0.17,
"grad_norm": 9.100408554077148,
"learning_rate": 4.585511575802838e-05,
"loss": 1.3256,
"step": 520
},
{
"epoch": 0.18,
"grad_norm": 8.152189254760742,
"learning_rate": 4.566840926064227e-05,
"loss": 0.9898,
"step": 530
},
{
"epoch": 0.18,
"grad_norm": 9.004831314086914,
"learning_rate": 4.5481702763256165e-05,
"loss": 0.9567,
"step": 540
},
{
"epoch": 0.18,
"grad_norm": 10.926904678344727,
"learning_rate": 4.529499626587006e-05,
"loss": 1.1919,
"step": 550
},
{
"epoch": 0.19,
"grad_norm": 11.16217041015625,
"learning_rate": 4.510828976848395e-05,
"loss": 0.9503,
"step": 560
},
{
"epoch": 0.19,
"grad_norm": 8.947577476501465,
"learning_rate": 4.492158327109784e-05,
"loss": 1.1709,
"step": 570
},
{
"epoch": 0.19,
"grad_norm": 17.401987075805664,
"learning_rate": 4.473487677371173e-05,
"loss": 1.1109,
"step": 580
},
{
"epoch": 0.2,
"grad_norm": 4.542303085327148,
"learning_rate": 4.454817027632562e-05,
"loss": 1.0982,
"step": 590
},
{
"epoch": 0.2,
"grad_norm": 11.068498611450195,
"learning_rate": 4.436146377893951e-05,
"loss": 0.8818,
"step": 600
},
{
"epoch": 0.2,
"grad_norm": 8.869817733764648,
"learning_rate": 4.4174757281553404e-05,
"loss": 0.9708,
"step": 610
},
{
"epoch": 0.21,
"grad_norm": 11.837583541870117,
"learning_rate": 4.398805078416729e-05,
"loss": 0.912,
"step": 620
},
{
"epoch": 0.21,
"grad_norm": 10.15700626373291,
"learning_rate": 4.380134428678118e-05,
"loss": 1.0116,
"step": 630
},
{
"epoch": 0.22,
"grad_norm": 14.383340835571289,
"learning_rate": 4.3614637789395075e-05,
"loss": 0.9178,
"step": 640
},
{
"epoch": 0.22,
"grad_norm": 10.986087799072266,
"learning_rate": 4.342793129200897e-05,
"loss": 0.7005,
"step": 650
},
{
"epoch": 0.22,
"grad_norm": 5.080931186676025,
"learning_rate": 4.324122479462285e-05,
"loss": 0.834,
"step": 660
},
{
"epoch": 0.23,
"grad_norm": 11.886563301086426,
"learning_rate": 4.3054518297236745e-05,
"loss": 1.1476,
"step": 670
},
{
"epoch": 0.23,
"grad_norm": 6.172848224639893,
"learning_rate": 4.286781179985064e-05,
"loss": 0.8403,
"step": 680
},
{
"epoch": 0.23,
"grad_norm": 5.088493824005127,
"learning_rate": 4.268110530246453e-05,
"loss": 0.9338,
"step": 690
},
{
"epoch": 0.24,
"grad_norm": 9.035572052001953,
"learning_rate": 4.2494398805078415e-05,
"loss": 0.9069,
"step": 700
},
{
"epoch": 0.24,
"grad_norm": 6.618842124938965,
"learning_rate": 4.230769230769231e-05,
"loss": 0.8403,
"step": 710
},
{
"epoch": 0.24,
"grad_norm": 12.929986953735352,
"learning_rate": 4.21209858103062e-05,
"loss": 1.0611,
"step": 720
},
{
"epoch": 0.25,
"grad_norm": 6.48470401763916,
"learning_rate": 4.193427931292009e-05,
"loss": 0.9453,
"step": 730
},
{
"epoch": 0.25,
"grad_norm": 13.81301212310791,
"learning_rate": 4.1747572815533984e-05,
"loss": 0.9737,
"step": 740
},
{
"epoch": 0.25,
"eval_accuracy": 0.614765100671141,
"eval_loss": 1.0215864181518555,
"eval_runtime": 1824.1699,
"eval_samples_per_second": 0.408,
"eval_steps_per_second": 0.052,
"step": 745
},
{
"epoch": 1.0,
"grad_norm": 6.812247276306152,
"learning_rate": 4.1560866318147876e-05,
"loss": 0.7102,
"step": 750
},
{
"epoch": 1.01,
"grad_norm": 8.490981101989746,
"learning_rate": 4.137415982076177e-05,
"loss": 0.7269,
"step": 760
},
{
"epoch": 1.01,
"grad_norm": 14.003782272338867,
"learning_rate": 4.118745332337566e-05,
"loss": 0.7074,
"step": 770
},
{
"epoch": 1.01,
"grad_norm": 12.912096977233887,
"learning_rate": 4.1000746825989546e-05,
"loss": 1.0037,
"step": 780
},
{
"epoch": 1.02,
"grad_norm": 11.331485748291016,
"learning_rate": 4.081404032860344e-05,
"loss": 0.8032,
"step": 790
},
{
"epoch": 1.02,
"grad_norm": 7.185470104217529,
"learning_rate": 4.062733383121733e-05,
"loss": 0.9243,
"step": 800
},
{
"epoch": 1.02,
"grad_norm": 6.5612711906433105,
"learning_rate": 4.0440627333831223e-05,
"loss": 0.874,
"step": 810
},
{
"epoch": 1.03,
"grad_norm": 14.173837661743164,
"learning_rate": 4.025392083644511e-05,
"loss": 0.8649,
"step": 820
},
{
"epoch": 1.03,
"grad_norm": 8.195253372192383,
"learning_rate": 4.0067214339059e-05,
"loss": 0.7709,
"step": 830
},
{
"epoch": 1.03,
"grad_norm": 5.4088544845581055,
"learning_rate": 3.9880507841672894e-05,
"loss": 0.7699,
"step": 840
},
{
"epoch": 1.04,
"grad_norm": 6.817777633666992,
"learning_rate": 3.9693801344286786e-05,
"loss": 0.8736,
"step": 850
},
{
"epoch": 1.04,
"grad_norm": 8.318948745727539,
"learning_rate": 3.950709484690067e-05,
"loss": 0.6866,
"step": 860
},
{
"epoch": 1.04,
"grad_norm": 12.06480598449707,
"learning_rate": 3.9320388349514564e-05,
"loss": 0.8834,
"step": 870
},
{
"epoch": 1.05,
"grad_norm": 6.914089679718018,
"learning_rate": 3.9133681852128456e-05,
"loss": 0.9787,
"step": 880
},
{
"epoch": 1.05,
"grad_norm": 7.699115753173828,
"learning_rate": 3.894697535474235e-05,
"loss": 0.8338,
"step": 890
},
{
"epoch": 1.05,
"grad_norm": 4.82650089263916,
"learning_rate": 3.8760268857356234e-05,
"loss": 0.6263,
"step": 900
},
{
"epoch": 1.06,
"grad_norm": 8.42751407623291,
"learning_rate": 3.8573562359970126e-05,
"loss": 0.8408,
"step": 910
},
{
"epoch": 1.06,
"grad_norm": 5.994837760925293,
"learning_rate": 3.838685586258402e-05,
"loss": 0.8424,
"step": 920
},
{
"epoch": 1.06,
"grad_norm": 7.990606307983398,
"learning_rate": 3.820014936519791e-05,
"loss": 0.7187,
"step": 930
},
{
"epoch": 1.07,
"grad_norm": 10.63245964050293,
"learning_rate": 3.8013442867811796e-05,
"loss": 0.6456,
"step": 940
},
{
"epoch": 1.07,
"grad_norm": 12.09002685546875,
"learning_rate": 3.782673637042569e-05,
"loss": 0.8223,
"step": 950
},
{
"epoch": 1.07,
"grad_norm": 9.54681396484375,
"learning_rate": 3.764002987303958e-05,
"loss": 0.7054,
"step": 960
},
{
"epoch": 1.08,
"grad_norm": 13.245128631591797,
"learning_rate": 3.745332337565347e-05,
"loss": 0.9493,
"step": 970
},
{
"epoch": 1.08,
"grad_norm": 4.244826793670654,
"learning_rate": 3.7266616878267365e-05,
"loss": 0.8649,
"step": 980
},
{
"epoch": 1.08,
"grad_norm": 10.754876136779785,
"learning_rate": 3.707991038088126e-05,
"loss": 0.8481,
"step": 990
},
{
"epoch": 1.09,
"grad_norm": 11.53078842163086,
"learning_rate": 3.689320388349515e-05,
"loss": 0.9808,
"step": 1000
},
{
"epoch": 1.09,
"grad_norm": 12.149744033813477,
"learning_rate": 3.670649738610904e-05,
"loss": 0.8797,
"step": 1010
},
{
"epoch": 1.09,
"grad_norm": 7.940331935882568,
"learning_rate": 3.651979088872293e-05,
"loss": 0.8612,
"step": 1020
},
{
"epoch": 1.1,
"grad_norm": 10.588911056518555,
"learning_rate": 3.633308439133682e-05,
"loss": 0.7769,
"step": 1030
},
{
"epoch": 1.1,
"grad_norm": 16.114530563354492,
"learning_rate": 3.614637789395071e-05,
"loss": 0.7163,
"step": 1040
},
{
"epoch": 1.1,
"grad_norm": 11.30330753326416,
"learning_rate": 3.5959671396564605e-05,
"loss": 0.7857,
"step": 1050
},
{
"epoch": 1.11,
"grad_norm": 4.0768561363220215,
"learning_rate": 3.577296489917849e-05,
"loss": 0.6938,
"step": 1060
},
{
"epoch": 1.11,
"grad_norm": 7.4695143699646,
"learning_rate": 3.558625840179238e-05,
"loss": 0.8988,
"step": 1070
},
{
"epoch": 1.11,
"grad_norm": 11.519210815429688,
"learning_rate": 3.5399551904406275e-05,
"loss": 0.8402,
"step": 1080
},
{
"epoch": 1.12,
"grad_norm": 15.348576545715332,
"learning_rate": 3.521284540702017e-05,
"loss": 0.7148,
"step": 1090
},
{
"epoch": 1.12,
"grad_norm": 8.274968147277832,
"learning_rate": 3.502613890963405e-05,
"loss": 0.7931,
"step": 1100
},
{
"epoch": 1.12,
"grad_norm": 18.91743278503418,
"learning_rate": 3.4839432412247945e-05,
"loss": 0.6662,
"step": 1110
},
{
"epoch": 1.13,
"grad_norm": 7.221806049346924,
"learning_rate": 3.465272591486184e-05,
"loss": 0.6487,
"step": 1120
},
{
"epoch": 1.13,
"grad_norm": 6.4034013748168945,
"learning_rate": 3.446601941747573e-05,
"loss": 0.6152,
"step": 1130
},
{
"epoch": 1.13,
"grad_norm": 8.308573722839355,
"learning_rate": 3.427931292008962e-05,
"loss": 0.8798,
"step": 1140
},
{
"epoch": 1.14,
"grad_norm": 7.0101213455200195,
"learning_rate": 3.409260642270351e-05,
"loss": 0.8473,
"step": 1150
},
{
"epoch": 1.14,
"grad_norm": 9.986639976501465,
"learning_rate": 3.39058999253174e-05,
"loss": 0.9895,
"step": 1160
},
{
"epoch": 1.14,
"grad_norm": 3.4603047370910645,
"learning_rate": 3.371919342793129e-05,
"loss": 0.7945,
"step": 1170
},
{
"epoch": 1.15,
"grad_norm": 9.439742088317871,
"learning_rate": 3.3532486930545184e-05,
"loss": 0.8037,
"step": 1180
},
{
"epoch": 1.15,
"grad_norm": 9.237773895263672,
"learning_rate": 3.334578043315908e-05,
"loss": 0.7623,
"step": 1190
},
{
"epoch": 1.15,
"grad_norm": 10.11063289642334,
"learning_rate": 3.315907393577297e-05,
"loss": 0.6762,
"step": 1200
},
{
"epoch": 1.16,
"grad_norm": 14.077370643615723,
"learning_rate": 3.297236743838686e-05,
"loss": 0.6943,
"step": 1210
},
{
"epoch": 1.16,
"grad_norm": 8.673574447631836,
"learning_rate": 3.2785660941000754e-05,
"loss": 0.6585,
"step": 1220
},
{
"epoch": 1.16,
"grad_norm": 10.235486030578613,
"learning_rate": 3.259895444361464e-05,
"loss": 0.6866,
"step": 1230
},
{
"epoch": 1.17,
"grad_norm": 12.640578269958496,
"learning_rate": 3.241224794622853e-05,
"loss": 0.7851,
"step": 1240
},
{
"epoch": 1.17,
"grad_norm": 6.836172580718994,
"learning_rate": 3.2225541448842424e-05,
"loss": 0.8098,
"step": 1250
},
{
"epoch": 1.17,
"grad_norm": 11.899585723876953,
"learning_rate": 3.2038834951456316e-05,
"loss": 0.9768,
"step": 1260
},
{
"epoch": 1.18,
"grad_norm": 9.843255043029785,
"learning_rate": 3.18521284540702e-05,
"loss": 0.5619,
"step": 1270
},
{
"epoch": 1.18,
"grad_norm": 7.826408863067627,
"learning_rate": 3.1665421956684094e-05,
"loss": 0.7163,
"step": 1280
},
{
"epoch": 1.18,
"grad_norm": 5.7413482666015625,
"learning_rate": 3.1478715459297986e-05,
"loss": 0.5594,
"step": 1290
},
{
"epoch": 1.19,
"grad_norm": 9.495389938354492,
"learning_rate": 3.129200896191188e-05,
"loss": 0.6122,
"step": 1300
},
{
"epoch": 1.19,
"grad_norm": 9.183694839477539,
"learning_rate": 3.1105302464525764e-05,
"loss": 0.7087,
"step": 1310
},
{
"epoch": 1.19,
"grad_norm": 2.8270673751831055,
"learning_rate": 3.0918595967139656e-05,
"loss": 0.6698,
"step": 1320
},
{
"epoch": 1.2,
"grad_norm": 12.440056800842285,
"learning_rate": 3.073188946975355e-05,
"loss": 0.5935,
"step": 1330
},
{
"epoch": 1.2,
"grad_norm": 10.87549114227295,
"learning_rate": 3.054518297236744e-05,
"loss": 0.756,
"step": 1340
},
{
"epoch": 1.2,
"grad_norm": 10.946492195129395,
"learning_rate": 3.035847647498133e-05,
"loss": 0.8023,
"step": 1350
},
{
"epoch": 1.21,
"grad_norm": 10.33581829071045,
"learning_rate": 3.0171769977595222e-05,
"loss": 0.672,
"step": 1360
},
{
"epoch": 1.21,
"grad_norm": 3.581252336502075,
"learning_rate": 2.9985063480209115e-05,
"loss": 0.6321,
"step": 1370
},
{
"epoch": 1.21,
"grad_norm": 6.161603927612305,
"learning_rate": 2.9798356982823007e-05,
"loss": 0.7259,
"step": 1380
},
{
"epoch": 1.22,
"grad_norm": 6.92000675201416,
"learning_rate": 2.9611650485436892e-05,
"loss": 0.4861,
"step": 1390
},
{
"epoch": 1.22,
"grad_norm": 10.259751319885254,
"learning_rate": 2.9424943988050785e-05,
"loss": 0.5506,
"step": 1400
},
{
"epoch": 1.22,
"grad_norm": 7.796722888946533,
"learning_rate": 2.9238237490664677e-05,
"loss": 0.7225,
"step": 1410
},
{
"epoch": 1.23,
"grad_norm": 10.522936820983887,
"learning_rate": 2.905153099327857e-05,
"loss": 0.6722,
"step": 1420
},
{
"epoch": 1.23,
"grad_norm": 8.046516418457031,
"learning_rate": 2.8864824495892455e-05,
"loss": 0.7846,
"step": 1430
},
{
"epoch": 1.23,
"grad_norm": 7.680605411529541,
"learning_rate": 2.8678117998506347e-05,
"loss": 0.6704,
"step": 1440
},
{
"epoch": 1.24,
"grad_norm": 14.043888092041016,
"learning_rate": 2.849141150112024e-05,
"loss": 0.6614,
"step": 1450
},
{
"epoch": 1.24,
"grad_norm": 11.873641967773438,
"learning_rate": 2.8304705003734132e-05,
"loss": 0.5319,
"step": 1460
},
{
"epoch": 1.24,
"grad_norm": 18.787263870239258,
"learning_rate": 2.811799850634802e-05,
"loss": 0.6216,
"step": 1470
},
{
"epoch": 1.25,
"grad_norm": 9.360286712646484,
"learning_rate": 2.7931292008961913e-05,
"loss": 0.5028,
"step": 1480
},
{
"epoch": 1.25,
"grad_norm": 49.45521926879883,
"learning_rate": 2.7744585511575805e-05,
"loss": 0.9205,
"step": 1490
},
{
"epoch": 1.25,
"eval_accuracy": 0.6510067114093959,
"eval_loss": 0.923988938331604,
"eval_runtime": 1688.0484,
"eval_samples_per_second": 0.441,
"eval_steps_per_second": 0.056,
"step": 1490
},
{
"epoch": 2.0,
"grad_norm": 11.202278137207031,
"learning_rate": 2.7557879014189698e-05,
"loss": 0.5834,
"step": 1500
},
{
"epoch": 2.01,
"grad_norm": 9.430272102355957,
"learning_rate": 2.7371172516803583e-05,
"loss": 0.5599,
"step": 1510
},
{
"epoch": 2.01,
"grad_norm": 13.628783226013184,
"learning_rate": 2.7184466019417475e-05,
"loss": 0.5126,
"step": 1520
},
{
"epoch": 2.01,
"grad_norm": 6.143797397613525,
"learning_rate": 2.6997759522031368e-05,
"loss": 0.5101,
"step": 1530
},
{
"epoch": 2.02,
"grad_norm": 9.375954627990723,
"learning_rate": 2.681105302464526e-05,
"loss": 0.488,
"step": 1540
},
{
"epoch": 2.02,
"grad_norm": 12.77665901184082,
"learning_rate": 2.662434652725915e-05,
"loss": 0.6865,
"step": 1550
},
{
"epoch": 2.02,
"grad_norm": 11.217716217041016,
"learning_rate": 2.643764002987304e-05,
"loss": 0.5748,
"step": 1560
},
{
"epoch": 2.03,
"grad_norm": 13.564386367797852,
"learning_rate": 2.6250933532486934e-05,
"loss": 0.6002,
"step": 1570
},
{
"epoch": 2.03,
"grad_norm": 9.264657020568848,
"learning_rate": 2.6064227035100826e-05,
"loss": 0.615,
"step": 1580
},
{
"epoch": 2.03,
"grad_norm": 15.021964073181152,
"learning_rate": 2.587752053771471e-05,
"loss": 0.7611,
"step": 1590
},
{
"epoch": 2.04,
"grad_norm": 8.8477783203125,
"learning_rate": 2.5690814040328604e-05,
"loss": 0.5559,
"step": 1600
},
{
"epoch": 2.04,
"grad_norm": 12.896989822387695,
"learning_rate": 2.5504107542942496e-05,
"loss": 0.5796,
"step": 1610
},
{
"epoch": 2.04,
"grad_norm": 5.042093276977539,
"learning_rate": 2.531740104555639e-05,
"loss": 0.632,
"step": 1620
},
{
"epoch": 2.05,
"grad_norm": 9.28765869140625,
"learning_rate": 2.5130694548170274e-05,
"loss": 0.6429,
"step": 1630
},
{
"epoch": 2.05,
"grad_norm": 13.730548858642578,
"learning_rate": 2.4943988050784166e-05,
"loss": 0.6212,
"step": 1640
},
{
"epoch": 2.05,
"grad_norm": 5.59623908996582,
"learning_rate": 2.475728155339806e-05,
"loss": 0.5832,
"step": 1650
},
{
"epoch": 2.06,
"grad_norm": 6.121994972229004,
"learning_rate": 2.4570575056011947e-05,
"loss": 0.5236,
"step": 1660
},
{
"epoch": 2.06,
"grad_norm": 14.531633377075195,
"learning_rate": 2.4383868558625843e-05,
"loss": 0.5765,
"step": 1670
},
{
"epoch": 2.06,
"grad_norm": 6.720921993255615,
"learning_rate": 2.4197162061239732e-05,
"loss": 0.6104,
"step": 1680
},
{
"epoch": 2.07,
"grad_norm": 6.224832057952881,
"learning_rate": 2.4010455563853624e-05,
"loss": 0.6797,
"step": 1690
},
{
"epoch": 2.07,
"grad_norm": 10.963885307312012,
"learning_rate": 2.3823749066467517e-05,
"loss": 0.579,
"step": 1700
},
{
"epoch": 2.07,
"grad_norm": 9.707019805908203,
"learning_rate": 2.3637042569081406e-05,
"loss": 0.5891,
"step": 1710
},
{
"epoch": 2.08,
"grad_norm": 12.264431953430176,
"learning_rate": 2.3450336071695298e-05,
"loss": 0.6558,
"step": 1720
},
{
"epoch": 2.08,
"grad_norm": 9.180527687072754,
"learning_rate": 2.3263629574309187e-05,
"loss": 0.5394,
"step": 1730
},
{
"epoch": 2.08,
"grad_norm": 10.08325481414795,
"learning_rate": 2.307692307692308e-05,
"loss": 0.6894,
"step": 1740
},
{
"epoch": 2.09,
"grad_norm": 7.65585994720459,
"learning_rate": 2.2890216579536968e-05,
"loss": 0.4734,
"step": 1750
},
{
"epoch": 2.09,
"grad_norm": 6.85057258605957,
"learning_rate": 2.270351008215086e-05,
"loss": 0.5201,
"step": 1760
},
{
"epoch": 2.09,
"grad_norm": 9.979317665100098,
"learning_rate": 2.251680358476475e-05,
"loss": 0.5334,
"step": 1770
},
{
"epoch": 2.1,
"grad_norm": 7.880558013916016,
"learning_rate": 2.233009708737864e-05,
"loss": 0.6427,
"step": 1780
},
{
"epoch": 2.1,
"grad_norm": 7.206289291381836,
"learning_rate": 2.2143390589992534e-05,
"loss": 0.7938,
"step": 1790
},
{
"epoch": 2.1,
"grad_norm": 4.514979839324951,
"learning_rate": 2.1956684092606426e-05,
"loss": 0.4731,
"step": 1800
},
{
"epoch": 2.11,
"grad_norm": 5.672696590423584,
"learning_rate": 2.1769977595220315e-05,
"loss": 0.5509,
"step": 1810
},
{
"epoch": 2.11,
"grad_norm": 12.793485641479492,
"learning_rate": 2.1583271097834207e-05,
"loss": 0.6027,
"step": 1820
},
{
"epoch": 2.11,
"grad_norm": 12.888551712036133,
"learning_rate": 2.1396564600448096e-05,
"loss": 0.6495,
"step": 1830
},
{
"epoch": 2.12,
"grad_norm": 8.044211387634277,
"learning_rate": 2.120985810306199e-05,
"loss": 0.5304,
"step": 1840
},
{
"epoch": 2.12,
"grad_norm": 11.73928451538086,
"learning_rate": 2.1023151605675877e-05,
"loss": 0.6466,
"step": 1850
},
{
"epoch": 2.12,
"grad_norm": 9.793115615844727,
"learning_rate": 2.083644510828977e-05,
"loss": 0.6457,
"step": 1860
},
{
"epoch": 2.13,
"grad_norm": 15.4037504196167,
"learning_rate": 2.064973861090366e-05,
"loss": 0.6134,
"step": 1870
},
{
"epoch": 2.13,
"grad_norm": 8.58609390258789,
"learning_rate": 2.046303211351755e-05,
"loss": 0.6056,
"step": 1880
},
{
"epoch": 2.13,
"grad_norm": 14.316490173339844,
"learning_rate": 2.0276325616131443e-05,
"loss": 0.6665,
"step": 1890
},
{
"epoch": 2.14,
"grad_norm": 15.085420608520508,
"learning_rate": 2.0089619118745336e-05,
"loss": 0.6414,
"step": 1900
},
{
"epoch": 2.14,
"grad_norm": 10.707281112670898,
"learning_rate": 1.9902912621359225e-05,
"loss": 0.7794,
"step": 1910
},
{
"epoch": 2.14,
"grad_norm": 13.632383346557617,
"learning_rate": 1.9716206123973117e-05,
"loss": 0.534,
"step": 1920
},
{
"epoch": 2.15,
"grad_norm": 8.533977508544922,
"learning_rate": 1.9529499626587006e-05,
"loss": 0.549,
"step": 1930
},
{
"epoch": 2.15,
"grad_norm": 10.36202335357666,
"learning_rate": 1.9342793129200898e-05,
"loss": 0.6243,
"step": 1940
},
{
"epoch": 2.15,
"grad_norm": 9.732759475708008,
"learning_rate": 1.9156086631814787e-05,
"loss": 0.5558,
"step": 1950
},
{
"epoch": 2.16,
"grad_norm": 11.463001251220703,
"learning_rate": 1.896938013442868e-05,
"loss": 0.608,
"step": 1960
},
{
"epoch": 2.16,
"grad_norm": 12.787510871887207,
"learning_rate": 1.8782673637042568e-05,
"loss": 0.4653,
"step": 1970
},
{
"epoch": 2.16,
"grad_norm": 2.2048239707946777,
"learning_rate": 1.859596713965646e-05,
"loss": 0.4185,
"step": 1980
},
{
"epoch": 2.17,
"grad_norm": 14.132601737976074,
"learning_rate": 1.8409260642270353e-05,
"loss": 0.5964,
"step": 1990
},
{
"epoch": 2.17,
"grad_norm": 13.3015775680542,
"learning_rate": 1.8222554144884245e-05,
"loss": 0.5275,
"step": 2000
},
{
"epoch": 2.17,
"grad_norm": 9.547086715698242,
"learning_rate": 1.8035847647498134e-05,
"loss": 0.4277,
"step": 2010
},
{
"epoch": 2.18,
"grad_norm": 4.934826374053955,
"learning_rate": 1.7849141150112026e-05,
"loss": 0.5148,
"step": 2020
},
{
"epoch": 2.18,
"grad_norm": 8.04011344909668,
"learning_rate": 1.7662434652725915e-05,
"loss": 0.5389,
"step": 2030
},
{
"epoch": 2.18,
"grad_norm": 6.342478275299072,
"learning_rate": 1.7475728155339808e-05,
"loss": 0.6957,
"step": 2040
},
{
"epoch": 2.19,
"grad_norm": 8.864142417907715,
"learning_rate": 1.7289021657953697e-05,
"loss": 0.4428,
"step": 2050
},
{
"epoch": 2.19,
"grad_norm": 5.753154277801514,
"learning_rate": 1.710231516056759e-05,
"loss": 0.3995,
"step": 2060
},
{
"epoch": 2.19,
"grad_norm": 5.699671745300293,
"learning_rate": 1.6915608663181478e-05,
"loss": 0.4798,
"step": 2070
},
{
"epoch": 2.2,
"grad_norm": 10.056828498840332,
"learning_rate": 1.672890216579537e-05,
"loss": 0.6173,
"step": 2080
},
{
"epoch": 2.2,
"grad_norm": 6.651643753051758,
"learning_rate": 1.654219566840926e-05,
"loss": 0.4855,
"step": 2090
},
{
"epoch": 2.2,
"grad_norm": 10.440901756286621,
"learning_rate": 1.635548917102315e-05,
"loss": 0.5887,
"step": 2100
},
{
"epoch": 2.21,
"grad_norm": 5.655547618865967,
"learning_rate": 1.6168782673637044e-05,
"loss": 0.5435,
"step": 2110
},
{
"epoch": 2.21,
"grad_norm": 10.561293601989746,
"learning_rate": 1.5982076176250936e-05,
"loss": 0.3069,
"step": 2120
},
{
"epoch": 2.22,
"grad_norm": 10.159906387329102,
"learning_rate": 1.5795369678864825e-05,
"loss": 0.5445,
"step": 2130
},
{
"epoch": 2.22,
"grad_norm": 3.5584466457366943,
"learning_rate": 1.5608663181478717e-05,
"loss": 0.3836,
"step": 2140
},
{
"epoch": 2.22,
"grad_norm": 15.946599960327148,
"learning_rate": 1.5421956684092606e-05,
"loss": 0.5615,
"step": 2150
},
{
"epoch": 2.23,
"grad_norm": 11.349198341369629,
"learning_rate": 1.5235250186706498e-05,
"loss": 0.6222,
"step": 2160
},
{
"epoch": 2.23,
"grad_norm": 11.590112686157227,
"learning_rate": 1.5048543689320387e-05,
"loss": 0.4596,
"step": 2170
},
{
"epoch": 2.23,
"grad_norm": 6.031866073608398,
"learning_rate": 1.486183719193428e-05,
"loss": 0.6662,
"step": 2180
},
{
"epoch": 2.24,
"grad_norm": 11.580230712890625,
"learning_rate": 1.467513069454817e-05,
"loss": 0.5664,
"step": 2190
},
{
"epoch": 2.24,
"grad_norm": 7.525058746337891,
"learning_rate": 1.4488424197162062e-05,
"loss": 0.5321,
"step": 2200
},
{
"epoch": 2.24,
"grad_norm": 10.779327392578125,
"learning_rate": 1.4301717699775951e-05,
"loss": 0.5125,
"step": 2210
},
{
"epoch": 2.25,
"grad_norm": 10.189163208007812,
"learning_rate": 1.4115011202389844e-05,
"loss": 0.3914,
"step": 2220
},
{
"epoch": 2.25,
"grad_norm": 10.56506633758545,
"learning_rate": 1.3928304705003734e-05,
"loss": 0.456,
"step": 2230
},
{
"epoch": 2.25,
"eval_accuracy": 0.7154362416107383,
"eval_loss": 0.7563889622688293,
"eval_runtime": 1697.7729,
"eval_samples_per_second": 0.439,
"eval_steps_per_second": 0.055,
"step": 2235
},
{
"epoch": 3.0,
"grad_norm": 6.929454326629639,
"learning_rate": 1.3741598207617627e-05,
"loss": 0.6988,
"step": 2240
},
{
"epoch": 3.01,
"grad_norm": 14.029314994812012,
"learning_rate": 1.3554891710231516e-05,
"loss": 0.5992,
"step": 2250
},
{
"epoch": 3.01,
"grad_norm": 5.4998626708984375,
"learning_rate": 1.3368185212845408e-05,
"loss": 0.2611,
"step": 2260
},
{
"epoch": 3.01,
"grad_norm": 8.145641326904297,
"learning_rate": 1.3181478715459297e-05,
"loss": 0.4163,
"step": 2270
},
{
"epoch": 3.02,
"grad_norm": 9.244057655334473,
"learning_rate": 1.2994772218073189e-05,
"loss": 0.4883,
"step": 2280
},
{
"epoch": 3.02,
"grad_norm": 5.202174186706543,
"learning_rate": 1.2808065720687081e-05,
"loss": 0.47,
"step": 2290
},
{
"epoch": 3.02,
"grad_norm": 9.33508586883545,
"learning_rate": 1.2621359223300972e-05,
"loss": 0.4289,
"step": 2300
},
{
"epoch": 3.03,
"grad_norm": 7.724455833435059,
"learning_rate": 1.2434652725914863e-05,
"loss": 0.4514,
"step": 2310
},
{
"epoch": 3.03,
"grad_norm": 4.528939723968506,
"learning_rate": 1.2247946228528753e-05,
"loss": 0.4015,
"step": 2320
},
{
"epoch": 3.03,
"grad_norm": 7.545526027679443,
"learning_rate": 1.2061239731142644e-05,
"loss": 0.5546,
"step": 2330
},
{
"epoch": 3.04,
"grad_norm": 12.792080879211426,
"learning_rate": 1.1874533233756534e-05,
"loss": 0.4298,
"step": 2340
},
{
"epoch": 3.04,
"grad_norm": 12.657350540161133,
"learning_rate": 1.1687826736370427e-05,
"loss": 0.4234,
"step": 2350
},
{
"epoch": 3.04,
"grad_norm": 10.286149024963379,
"learning_rate": 1.1501120238984317e-05,
"loss": 0.5744,
"step": 2360
},
{
"epoch": 3.05,
"grad_norm": 12.716216087341309,
"learning_rate": 1.1314413741598208e-05,
"loss": 0.447,
"step": 2370
},
{
"epoch": 3.05,
"grad_norm": 10.211912155151367,
"learning_rate": 1.1127707244212099e-05,
"loss": 0.3881,
"step": 2380
},
{
"epoch": 3.05,
"grad_norm": 9.413333892822266,
"learning_rate": 1.094100074682599e-05,
"loss": 0.5085,
"step": 2390
},
{
"epoch": 3.06,
"grad_norm": 5.344712734222412,
"learning_rate": 1.0754294249439881e-05,
"loss": 0.5566,
"step": 2400
},
{
"epoch": 3.06,
"grad_norm": 7.993694305419922,
"learning_rate": 1.0567587752053772e-05,
"loss": 0.53,
"step": 2410
},
{
"epoch": 3.06,
"grad_norm": 11.350872993469238,
"learning_rate": 1.0380881254667663e-05,
"loss": 0.3918,
"step": 2420
},
{
"epoch": 3.07,
"grad_norm": 13.153114318847656,
"learning_rate": 1.0194174757281553e-05,
"loss": 0.3662,
"step": 2430
},
{
"epoch": 3.07,
"grad_norm": 5.188636302947998,
"learning_rate": 1.0007468259895444e-05,
"loss": 0.3685,
"step": 2440
},
{
"epoch": 3.07,
"grad_norm": 10.718949317932129,
"learning_rate": 9.820761762509336e-06,
"loss": 0.3139,
"step": 2450
},
{
"epoch": 3.08,
"grad_norm": 5.5543131828308105,
"learning_rate": 9.634055265123227e-06,
"loss": 0.3762,
"step": 2460
},
{
"epoch": 3.08,
"grad_norm": 12.107147216796875,
"learning_rate": 9.447348767737117e-06,
"loss": 0.4458,
"step": 2470
},
{
"epoch": 3.08,
"grad_norm": 8.29355525970459,
"learning_rate": 9.260642270351008e-06,
"loss": 0.3784,
"step": 2480
},
{
"epoch": 3.09,
"grad_norm": 4.374119758605957,
"learning_rate": 9.073935772964899e-06,
"loss": 0.2344,
"step": 2490
},
{
"epoch": 3.09,
"grad_norm": 7.9884772300720215,
"learning_rate": 8.88722927557879e-06,
"loss": 0.2879,
"step": 2500
},
{
"epoch": 3.09,
"grad_norm": 5.281538009643555,
"learning_rate": 8.700522778192682e-06,
"loss": 0.4637,
"step": 2510
},
{
"epoch": 3.1,
"grad_norm": 11.596240997314453,
"learning_rate": 8.513816280806572e-06,
"loss": 0.3851,
"step": 2520
},
{
"epoch": 3.1,
"grad_norm": 4.9211859703063965,
"learning_rate": 8.327109783420463e-06,
"loss": 0.4068,
"step": 2530
},
{
"epoch": 3.1,
"grad_norm": 17.217477798461914,
"learning_rate": 8.140403286034353e-06,
"loss": 0.4238,
"step": 2540
},
{
"epoch": 3.11,
"grad_norm": 5.638582229614258,
"learning_rate": 7.953696788648244e-06,
"loss": 0.5215,
"step": 2550
},
{
"epoch": 3.11,
"grad_norm": 5.49488639831543,
"learning_rate": 7.766990291262136e-06,
"loss": 0.4626,
"step": 2560
},
{
"epoch": 3.11,
"grad_norm": 11.323406219482422,
"learning_rate": 7.580283793876028e-06,
"loss": 0.385,
"step": 2570
},
{
"epoch": 3.12,
"grad_norm": 7.352632522583008,
"learning_rate": 7.393577296489919e-06,
"loss": 0.4436,
"step": 2580
},
{
"epoch": 3.12,
"grad_norm": 9.734176635742188,
"learning_rate": 7.20687079910381e-06,
"loss": 0.3708,
"step": 2590
},
{
"epoch": 3.12,
"grad_norm": 7.2649149894714355,
"learning_rate": 7.0201643017177005e-06,
"loss": 0.3387,
"step": 2600
},
{
"epoch": 3.13,
"grad_norm": 6.474803447723389,
"learning_rate": 6.833457804331592e-06,
"loss": 0.4337,
"step": 2610
},
{
"epoch": 3.13,
"grad_norm": 7.163308143615723,
"learning_rate": 6.6467513069454825e-06,
"loss": 0.353,
"step": 2620
},
{
"epoch": 3.13,
"grad_norm": 1.612182378768921,
"learning_rate": 6.460044809559373e-06,
"loss": 0.3491,
"step": 2630
},
{
"epoch": 3.14,
"grad_norm": 10.161432266235352,
"learning_rate": 6.273338312173265e-06,
"loss": 0.3641,
"step": 2640
},
{
"epoch": 3.14,
"grad_norm": 10.15164566040039,
"learning_rate": 6.086631814787154e-06,
"loss": 0.5455,
"step": 2650
},
{
"epoch": 3.14,
"grad_norm": 7.675275802612305,
"learning_rate": 5.899925317401046e-06,
"loss": 0.3785,
"step": 2660
},
{
"epoch": 3.15,
"grad_norm": 9.649803161621094,
"learning_rate": 5.7132188200149364e-06,
"loss": 0.358,
"step": 2670
},
{
"epoch": 3.15,
"grad_norm": 6.838894844055176,
"learning_rate": 5.526512322628828e-06,
"loss": 0.3493,
"step": 2680
},
{
"epoch": 3.15,
"grad_norm": 8.323789596557617,
"learning_rate": 5.3398058252427185e-06,
"loss": 0.3981,
"step": 2690
},
{
"epoch": 3.16,
"grad_norm": 5.8700761795043945,
"learning_rate": 5.153099327856609e-06,
"loss": 0.2634,
"step": 2700
},
{
"epoch": 3.16,
"grad_norm": 13.782732009887695,
"learning_rate": 4.966392830470501e-06,
"loss": 0.5584,
"step": 2710
},
{
"epoch": 3.16,
"grad_norm": 10.914461135864258,
"learning_rate": 4.779686333084392e-06,
"loss": 0.455,
"step": 2720
},
{
"epoch": 3.17,
"grad_norm": 11.195931434631348,
"learning_rate": 4.592979835698283e-06,
"loss": 0.2783,
"step": 2730
},
{
"epoch": 3.17,
"grad_norm": 11.526093482971191,
"learning_rate": 4.406273338312174e-06,
"loss": 0.2719,
"step": 2740
},
{
"epoch": 3.17,
"grad_norm": 12.902427673339844,
"learning_rate": 4.219566840926065e-06,
"loss": 0.4308,
"step": 2750
},
{
"epoch": 3.18,
"grad_norm": 7.28366756439209,
"learning_rate": 4.032860343539955e-06,
"loss": 0.4471,
"step": 2760
},
{
"epoch": 3.18,
"grad_norm": 7.770831108093262,
"learning_rate": 3.846153846153847e-06,
"loss": 0.5488,
"step": 2770
},
{
"epoch": 3.18,
"grad_norm": 5.66066837310791,
"learning_rate": 3.6594473487677374e-06,
"loss": 0.4157,
"step": 2780
},
{
"epoch": 3.19,
"grad_norm": 4.850650787353516,
"learning_rate": 3.4727408513816284e-06,
"loss": 0.375,
"step": 2790
},
{
"epoch": 3.19,
"grad_norm": 7.98192024230957,
"learning_rate": 3.2860343539955195e-06,
"loss": 0.4436,
"step": 2800
},
{
"epoch": 3.19,
"grad_norm": 9.876733779907227,
"learning_rate": 3.09932785660941e-06,
"loss": 0.6538,
"step": 2810
},
{
"epoch": 3.2,
"grad_norm": 12.203571319580078,
"learning_rate": 2.912621359223301e-06,
"loss": 0.2297,
"step": 2820
},
{
"epoch": 3.2,
"grad_norm": 10.11854362487793,
"learning_rate": 2.725914861837192e-06,
"loss": 0.4069,
"step": 2830
},
{
"epoch": 3.2,
"grad_norm": 9.628511428833008,
"learning_rate": 2.539208364451083e-06,
"loss": 0.4468,
"step": 2840
},
{
"epoch": 3.21,
"grad_norm": 13.15705394744873,
"learning_rate": 2.352501867064974e-06,
"loss": 0.3533,
"step": 2850
},
{
"epoch": 3.21,
"grad_norm": 13.94404411315918,
"learning_rate": 2.165795369678865e-06,
"loss": 0.3798,
"step": 2860
},
{
"epoch": 3.21,
"grad_norm": 10.802663803100586,
"learning_rate": 1.979088872292756e-06,
"loss": 0.4173,
"step": 2870
},
{
"epoch": 3.22,
"grad_norm": 6.215898513793945,
"learning_rate": 1.7923823749066467e-06,
"loss": 0.3416,
"step": 2880
},
{
"epoch": 3.22,
"grad_norm": 8.513174057006836,
"learning_rate": 1.6056758775205377e-06,
"loss": 0.3657,
"step": 2890
},
{
"epoch": 3.22,
"grad_norm": 1.2327791452407837,
"learning_rate": 1.4189693801344288e-06,
"loss": 0.3455,
"step": 2900
},
{
"epoch": 3.23,
"grad_norm": 11.529572486877441,
"learning_rate": 1.2322628827483198e-06,
"loss": 0.2163,
"step": 2910
},
{
"epoch": 3.23,
"grad_norm": 3.785139799118042,
"learning_rate": 1.0455563853622106e-06,
"loss": 0.3368,
"step": 2920
},
{
"epoch": 3.23,
"grad_norm": 10.868538856506348,
"learning_rate": 8.588498879761017e-07,
"loss": 0.3235,
"step": 2930
},
{
"epoch": 3.24,
"grad_norm": 12.210295677185059,
"learning_rate": 6.721433905899926e-07,
"loss": 0.2774,
"step": 2940
},
{
"epoch": 3.24,
"grad_norm": 2.8030104637145996,
"learning_rate": 4.854368932038835e-07,
"loss": 0.2727,
"step": 2950
},
{
"epoch": 3.24,
"grad_norm": 13.06821060180664,
"learning_rate": 2.987303958177745e-07,
"loss": 0.3094,
"step": 2960
},
{
"epoch": 3.25,
"grad_norm": 6.2003912925720215,
"learning_rate": 1.1202389843166542e-07,
"loss": 0.3316,
"step": 2970
},
{
"epoch": 3.25,
"eval_accuracy": 0.72751677852349,
"eval_loss": 0.7167822122573853,
"eval_runtime": 1874.7039,
"eval_samples_per_second": 0.397,
"eval_steps_per_second": 0.05,
"step": 2976
},
{
"epoch": 3.25,
"step": 2976,
"total_flos": 6.0945535892058145e+19,
"train_loss": 0.7312911278618279,
"train_runtime": 121527.2742,
"train_samples_per_second": 0.196,
"train_steps_per_second": 0.024
},
{
"epoch": 3.25,
"eval_accuracy": 0.7163978494623656,
"eval_loss": 0.7914323806762695,
"eval_runtime": 1840.54,
"eval_samples_per_second": 0.404,
"eval_steps_per_second": 0.051,
"step": 2976
},
{
"epoch": 3.25,
"eval_accuracy": 0.7163978494623656,
"eval_loss": 0.7914324402809143,
"eval_runtime": 1837.684,
"eval_samples_per_second": 0.405,
"eval_steps_per_second": 0.051,
"step": 2976
}
],
"logging_steps": 10,
"max_steps": 2976,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 6.0945535892058145e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}