trapoom555's picture
Upload checkpoints
7b65800
raw
history blame
24.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.15239256324291375,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001015950421619425,
"grad_norm": 17.625,
"learning_rate": 5e-06,
"loss": 3.4264,
"step": 10
},
{
"epoch": 0.00203190084323885,
"grad_norm": 12.5625,
"learning_rate": 1e-05,
"loss": 3.432,
"step": 20
},
{
"epoch": 0.003047851264858275,
"grad_norm": 14.0625,
"learning_rate": 1.5e-05,
"loss": 3.23,
"step": 30
},
{
"epoch": 0.0040638016864777,
"grad_norm": 12.4375,
"learning_rate": 2e-05,
"loss": 2.9762,
"step": 40
},
{
"epoch": 0.005079752108097125,
"grad_norm": 10.0625,
"learning_rate": 2.5e-05,
"loss": 2.6173,
"step": 50
},
{
"epoch": 0.00609570252971655,
"grad_norm": 10.1875,
"learning_rate": 3e-05,
"loss": 2.2004,
"step": 60
},
{
"epoch": 0.007111652951335975,
"grad_norm": 7.03125,
"learning_rate": 3.5e-05,
"loss": 1.4176,
"step": 70
},
{
"epoch": 0.0081276033729554,
"grad_norm": 4.375,
"learning_rate": 4e-05,
"loss": 1.0122,
"step": 80
},
{
"epoch": 0.009143553794574825,
"grad_norm": 6.5625,
"learning_rate": 4.5e-05,
"loss": 0.9116,
"step": 90
},
{
"epoch": 0.01015950421619425,
"grad_norm": 5.28125,
"learning_rate": 5e-05,
"loss": 0.6832,
"step": 100
},
{
"epoch": 0.011175454637813675,
"grad_norm": 5.5,
"learning_rate": 4.9999870035728426e-05,
"loss": 0.7355,
"step": 110
},
{
"epoch": 0.0121914050594331,
"grad_norm": 5.1875,
"learning_rate": 4.9999480144264944e-05,
"loss": 0.6673,
"step": 120
},
{
"epoch": 0.013207355481052525,
"grad_norm": 4.5,
"learning_rate": 4.9998830329663314e-05,
"loss": 0.6792,
"step": 130
},
{
"epoch": 0.01422330590267195,
"grad_norm": 3.9375,
"learning_rate": 4.9997920598679756e-05,
"loss": 0.6207,
"step": 140
},
{
"epoch": 0.015239256324291375,
"grad_norm": 3.15625,
"learning_rate": 4.999675096077286e-05,
"loss": 0.483,
"step": 150
},
{
"epoch": 0.0162552067459108,
"grad_norm": 5.28125,
"learning_rate": 4.999532142810354e-05,
"loss": 0.5319,
"step": 160
},
{
"epoch": 0.017271157167530225,
"grad_norm": 4.59375,
"learning_rate": 4.999363201553483e-05,
"loss": 0.6052,
"step": 170
},
{
"epoch": 0.01828710758914965,
"grad_norm": 5.03125,
"learning_rate": 4.9991682740631794e-05,
"loss": 0.4258,
"step": 180
},
{
"epoch": 0.019303058010769075,
"grad_norm": 3.859375,
"learning_rate": 4.998947362366133e-05,
"loss": 0.4309,
"step": 190
},
{
"epoch": 0.0203190084323885,
"grad_norm": 3.328125,
"learning_rate": 4.998700468759193e-05,
"loss": 0.3957,
"step": 200
},
{
"epoch": 0.021334958854007924,
"grad_norm": 4.9375,
"learning_rate": 4.9984275958093475e-05,
"loss": 0.4777,
"step": 210
},
{
"epoch": 0.02235090927562735,
"grad_norm": 4.78125,
"learning_rate": 4.998128746353695e-05,
"loss": 0.3549,
"step": 220
},
{
"epoch": 0.023366859697246774,
"grad_norm": 4.0625,
"learning_rate": 4.997803923499417e-05,
"loss": 0.4447,
"step": 230
},
{
"epoch": 0.0243828101188662,
"grad_norm": 6.375,
"learning_rate": 4.99745313062374e-05,
"loss": 0.3808,
"step": 240
},
{
"epoch": 0.025398760540485624,
"grad_norm": 3.59375,
"learning_rate": 4.99707637137391e-05,
"loss": 0.3827,
"step": 250
},
{
"epoch": 0.02641471096210505,
"grad_norm": 3.015625,
"learning_rate": 4.996673649667145e-05,
"loss": 0.3694,
"step": 260
},
{
"epoch": 0.027430661383724474,
"grad_norm": 2.296875,
"learning_rate": 4.9962449696906e-05,
"loss": 0.3586,
"step": 270
},
{
"epoch": 0.0284466118053439,
"grad_norm": 4.125,
"learning_rate": 4.9957903359013214e-05,
"loss": 0.3832,
"step": 280
},
{
"epoch": 0.029462562226963324,
"grad_norm": 3.296875,
"learning_rate": 4.995309753026201e-05,
"loss": 0.328,
"step": 290
},
{
"epoch": 0.03047851264858275,
"grad_norm": 4.5,
"learning_rate": 4.994803226061927e-05,
"loss": 0.3667,
"step": 300
},
{
"epoch": 0.03149446307020217,
"grad_norm": 4.3125,
"learning_rate": 4.994270760274933e-05,
"loss": 0.3811,
"step": 310
},
{
"epoch": 0.0325104134918216,
"grad_norm": 3.421875,
"learning_rate": 4.99371236120134e-05,
"loss": 0.3065,
"step": 320
},
{
"epoch": 0.03352636391344102,
"grad_norm": 4.6875,
"learning_rate": 4.993128034646902e-05,
"loss": 0.4177,
"step": 330
},
{
"epoch": 0.03454231433506045,
"grad_norm": 3.046875,
"learning_rate": 4.992517786686947e-05,
"loss": 0.33,
"step": 340
},
{
"epoch": 0.03555826475667987,
"grad_norm": 1.8828125,
"learning_rate": 4.9918816236663077e-05,
"loss": 0.3287,
"step": 350
},
{
"epoch": 0.0365742151782993,
"grad_norm": 3.8125,
"learning_rate": 4.991219552199262e-05,
"loss": 0.2934,
"step": 360
},
{
"epoch": 0.03759016559991872,
"grad_norm": 4.28125,
"learning_rate": 4.99053157916946e-05,
"loss": 0.3176,
"step": 370
},
{
"epoch": 0.03860611602153815,
"grad_norm": 2.609375,
"learning_rate": 4.989817711729856e-05,
"loss": 0.3318,
"step": 380
},
{
"epoch": 0.03962206644315757,
"grad_norm": 2.375,
"learning_rate": 4.98907795730263e-05,
"loss": 0.3234,
"step": 390
},
{
"epoch": 0.040638016864777,
"grad_norm": 4.46875,
"learning_rate": 4.988312323579114e-05,
"loss": 0.267,
"step": 400
},
{
"epoch": 0.04165396728639642,
"grad_norm": 3.75,
"learning_rate": 4.98752081851971e-05,
"loss": 0.3081,
"step": 410
},
{
"epoch": 0.04266991770801585,
"grad_norm": 2.203125,
"learning_rate": 4.986703450353809e-05,
"loss": 0.2917,
"step": 420
},
{
"epoch": 0.04368586812963527,
"grad_norm": 1.6015625,
"learning_rate": 4.985860227579703e-05,
"loss": 0.2805,
"step": 430
},
{
"epoch": 0.0447018185512547,
"grad_norm": 3.140625,
"learning_rate": 4.984991158964499e-05,
"loss": 0.3534,
"step": 440
},
{
"epoch": 0.04571776897287412,
"grad_norm": 3.296875,
"learning_rate": 4.9840962535440265e-05,
"loss": 0.335,
"step": 450
},
{
"epoch": 0.04673371939449355,
"grad_norm": 3.25,
"learning_rate": 4.983175520622744e-05,
"loss": 0.2544,
"step": 460
},
{
"epoch": 0.04774966981611297,
"grad_norm": 2.25,
"learning_rate": 4.982228969773642e-05,
"loss": 0.3449,
"step": 470
},
{
"epoch": 0.0487656202377324,
"grad_norm": 4.9375,
"learning_rate": 4.9812566108381435e-05,
"loss": 0.2964,
"step": 480
},
{
"epoch": 0.04978157065935182,
"grad_norm": 1.5703125,
"learning_rate": 4.9802584539260035e-05,
"loss": 0.2799,
"step": 490
},
{
"epoch": 0.05079752108097125,
"grad_norm": 2.828125,
"learning_rate": 4.979234509415199e-05,
"loss": 0.3231,
"step": 500
},
{
"epoch": 0.05181347150259067,
"grad_norm": 2.9375,
"learning_rate": 4.978184787951828e-05,
"loss": 0.2943,
"step": 510
},
{
"epoch": 0.0528294219242101,
"grad_norm": 2.34375,
"learning_rate": 4.977109300449992e-05,
"loss": 0.2705,
"step": 520
},
{
"epoch": 0.05384537234582952,
"grad_norm": 3.140625,
"learning_rate": 4.9760080580916876e-05,
"loss": 0.2998,
"step": 530
},
{
"epoch": 0.05486132276744895,
"grad_norm": 3.5625,
"learning_rate": 4.974881072326688e-05,
"loss": 0.2595,
"step": 540
},
{
"epoch": 0.05587727318906837,
"grad_norm": 4.25,
"learning_rate": 4.9737283548724236e-05,
"loss": 0.2803,
"step": 550
},
{
"epoch": 0.0568932236106878,
"grad_norm": 4.0625,
"learning_rate": 4.97254991771386e-05,
"loss": 0.3511,
"step": 560
},
{
"epoch": 0.05790917403230722,
"grad_norm": 2.515625,
"learning_rate": 4.971345773103377e-05,
"loss": 0.312,
"step": 570
},
{
"epoch": 0.05892512445392665,
"grad_norm": 3.21875,
"learning_rate": 4.9701159335606365e-05,
"loss": 0.2482,
"step": 580
},
{
"epoch": 0.05994107487554607,
"grad_norm": 5.5,
"learning_rate": 4.968860411872454e-05,
"loss": 0.2537,
"step": 590
},
{
"epoch": 0.0609570252971655,
"grad_norm": 3.546875,
"learning_rate": 4.967579221092666e-05,
"loss": 0.3125,
"step": 600
},
{
"epoch": 0.06197297571878492,
"grad_norm": 2.984375,
"learning_rate": 4.966272374541996e-05,
"loss": 0.2354,
"step": 610
},
{
"epoch": 0.06298892614040434,
"grad_norm": 3.6875,
"learning_rate": 4.964939885807912e-05,
"loss": 0.3213,
"step": 620
},
{
"epoch": 0.06400487656202378,
"grad_norm": 2.140625,
"learning_rate": 4.9635817687444876e-05,
"loss": 0.3003,
"step": 630
},
{
"epoch": 0.0650208269836432,
"grad_norm": 3.484375,
"learning_rate": 4.962198037472259e-05,
"loss": 0.2996,
"step": 640
},
{
"epoch": 0.06603677740526262,
"grad_norm": 3.21875,
"learning_rate": 4.9607887063780776e-05,
"loss": 0.2257,
"step": 650
},
{
"epoch": 0.06705272782688204,
"grad_norm": 5.375,
"learning_rate": 4.9593537901149564e-05,
"loss": 0.223,
"step": 660
},
{
"epoch": 0.06806867824850148,
"grad_norm": 4.1875,
"learning_rate": 4.957893303601924e-05,
"loss": 0.3407,
"step": 670
},
{
"epoch": 0.0690846286701209,
"grad_norm": 3.328125,
"learning_rate": 4.956407262023866e-05,
"loss": 0.2589,
"step": 680
},
{
"epoch": 0.07010057909174032,
"grad_norm": 2.953125,
"learning_rate": 4.954895680831367e-05,
"loss": 0.2949,
"step": 690
},
{
"epoch": 0.07111652951335974,
"grad_norm": 4.0625,
"learning_rate": 4.9533585757405506e-05,
"loss": 0.2995,
"step": 700
},
{
"epoch": 0.07213247993497918,
"grad_norm": 4.625,
"learning_rate": 4.951795962732917e-05,
"loss": 0.2894,
"step": 710
},
{
"epoch": 0.0731484303565986,
"grad_norm": 3.0,
"learning_rate": 4.9502078580551755e-05,
"loss": 0.3082,
"step": 720
},
{
"epoch": 0.07416438077821802,
"grad_norm": 3.65625,
"learning_rate": 4.9485942782190734e-05,
"loss": 0.2308,
"step": 730
},
{
"epoch": 0.07518033119983744,
"grad_norm": 4.78125,
"learning_rate": 4.9469552400012306e-05,
"loss": 0.2272,
"step": 740
},
{
"epoch": 0.07619628162145688,
"grad_norm": 4.25,
"learning_rate": 4.94529076044296e-05,
"loss": 0.2701,
"step": 750
},
{
"epoch": 0.0772122320430763,
"grad_norm": 3.140625,
"learning_rate": 4.94360085685009e-05,
"loss": 0.2686,
"step": 760
},
{
"epoch": 0.07822818246469572,
"grad_norm": 0.765625,
"learning_rate": 4.9418855467927894e-05,
"loss": 0.2051,
"step": 770
},
{
"epoch": 0.07924413288631514,
"grad_norm": 1.796875,
"learning_rate": 4.940144848105379e-05,
"loss": 0.2267,
"step": 780
},
{
"epoch": 0.08026008330793458,
"grad_norm": 4.5625,
"learning_rate": 4.93837877888615e-05,
"loss": 0.2597,
"step": 790
},
{
"epoch": 0.081276033729554,
"grad_norm": 3.03125,
"learning_rate": 4.9365873574971745e-05,
"loss": 0.3701,
"step": 800
},
{
"epoch": 0.08229198415117342,
"grad_norm": 4.5625,
"learning_rate": 4.9347706025641136e-05,
"loss": 0.2559,
"step": 810
},
{
"epoch": 0.08330793457279284,
"grad_norm": 3.90625,
"learning_rate": 4.9329285329760275e-05,
"loss": 0.2799,
"step": 820
},
{
"epoch": 0.08432388499441228,
"grad_norm": 3.140625,
"learning_rate": 4.9310611678851735e-05,
"loss": 0.2866,
"step": 830
},
{
"epoch": 0.0853398354160317,
"grad_norm": 2.46875,
"learning_rate": 4.929168526706811e-05,
"loss": 0.3105,
"step": 840
},
{
"epoch": 0.08635578583765112,
"grad_norm": 13.625,
"learning_rate": 4.927250629119e-05,
"loss": 0.2454,
"step": 850
},
{
"epoch": 0.08737173625927054,
"grad_norm": 3.921875,
"learning_rate": 4.9253074950623925e-05,
"loss": 0.2424,
"step": 860
},
{
"epoch": 0.08838768668088998,
"grad_norm": 2.90625,
"learning_rate": 4.9233391447400286e-05,
"loss": 0.2481,
"step": 870
},
{
"epoch": 0.0894036371025094,
"grad_norm": 2.96875,
"learning_rate": 4.921345598617125e-05,
"loss": 0.2231,
"step": 880
},
{
"epoch": 0.09041958752412882,
"grad_norm": 5.375,
"learning_rate": 4.9193268774208654e-05,
"loss": 0.3447,
"step": 890
},
{
"epoch": 0.09143553794574824,
"grad_norm": 2.0,
"learning_rate": 4.9172830021401785e-05,
"loss": 0.229,
"step": 900
},
{
"epoch": 0.09245148836736768,
"grad_norm": 3.1875,
"learning_rate": 4.9152139940255245e-05,
"loss": 0.2122,
"step": 910
},
{
"epoch": 0.0934674387889871,
"grad_norm": 3.40625,
"learning_rate": 4.913119874588677e-05,
"loss": 0.2386,
"step": 920
},
{
"epoch": 0.09448338921060652,
"grad_norm": 1.4609375,
"learning_rate": 4.911000665602489e-05,
"loss": 0.1944,
"step": 930
},
{
"epoch": 0.09549933963222594,
"grad_norm": 5.0625,
"learning_rate": 4.9088563891006786e-05,
"loss": 0.2038,
"step": 940
},
{
"epoch": 0.09651529005384538,
"grad_norm": 4.53125,
"learning_rate": 4.906687067377592e-05,
"loss": 0.3122,
"step": 950
},
{
"epoch": 0.0975312404754648,
"grad_norm": 2.84375,
"learning_rate": 4.904492722987976e-05,
"loss": 0.3157,
"step": 960
},
{
"epoch": 0.09854719089708422,
"grad_norm": 2.171875,
"learning_rate": 4.902273378746738e-05,
"loss": 0.3077,
"step": 970
},
{
"epoch": 0.09956314131870364,
"grad_norm": 2.84375,
"learning_rate": 4.9000290577287165e-05,
"loss": 0.2756,
"step": 980
},
{
"epoch": 0.10057909174032308,
"grad_norm": 0.99609375,
"learning_rate": 4.897759783268434e-05,
"loss": 0.2915,
"step": 990
},
{
"epoch": 0.1015950421619425,
"grad_norm": 3.53125,
"learning_rate": 4.895465578959859e-05,
"loss": 0.2052,
"step": 1000
},
{
"epoch": 0.10261099258356192,
"grad_norm": 4.0,
"learning_rate": 4.893146468656159e-05,
"loss": 0.2499,
"step": 1010
},
{
"epoch": 0.10362694300518134,
"grad_norm": 1.65625,
"learning_rate": 4.890802476469452e-05,
"loss": 0.278,
"step": 1020
},
{
"epoch": 0.10464289342680078,
"grad_norm": 3.625,
"learning_rate": 4.888433626770558e-05,
"loss": 0.2143,
"step": 1030
},
{
"epoch": 0.1056588438484202,
"grad_norm": 5.0625,
"learning_rate": 4.886039944188741e-05,
"loss": 0.2878,
"step": 1040
},
{
"epoch": 0.10667479427003962,
"grad_norm": 4.5,
"learning_rate": 4.883621453611461e-05,
"loss": 0.2744,
"step": 1050
},
{
"epoch": 0.10769074469165904,
"grad_norm": 4.5625,
"learning_rate": 4.881178180184106e-05,
"loss": 0.2734,
"step": 1060
},
{
"epoch": 0.10870669511327848,
"grad_norm": 3.125,
"learning_rate": 4.878710149309735e-05,
"loss": 0.3574,
"step": 1070
},
{
"epoch": 0.1097226455348979,
"grad_norm": 3.0625,
"learning_rate": 4.876217386648816e-05,
"loss": 0.2625,
"step": 1080
},
{
"epoch": 0.11073859595651732,
"grad_norm": 4.0625,
"learning_rate": 4.873699918118955e-05,
"loss": 0.2437,
"step": 1090
},
{
"epoch": 0.11175454637813674,
"grad_norm": 1.59375,
"learning_rate": 4.87115776989463e-05,
"loss": 0.2051,
"step": 1100
},
{
"epoch": 0.11277049679975618,
"grad_norm": 4.375,
"learning_rate": 4.8685909684069153e-05,
"loss": 0.1727,
"step": 1110
},
{
"epoch": 0.1137864472213756,
"grad_norm": 2.28125,
"learning_rate": 4.865999540343211e-05,
"loss": 0.2256,
"step": 1120
},
{
"epoch": 0.11480239764299502,
"grad_norm": 2.265625,
"learning_rate": 4.86338351264696e-05,
"loss": 0.3529,
"step": 1130
},
{
"epoch": 0.11581834806461444,
"grad_norm": 2.34375,
"learning_rate": 4.8607429125173754e-05,
"loss": 0.2113,
"step": 1140
},
{
"epoch": 0.11683429848623388,
"grad_norm": 0.7578125,
"learning_rate": 4.858077767409149e-05,
"loss": 0.2759,
"step": 1150
},
{
"epoch": 0.1178502489078533,
"grad_norm": 3.640625,
"learning_rate": 4.855388105032174e-05,
"loss": 0.2482,
"step": 1160
},
{
"epoch": 0.11886619932947272,
"grad_norm": 3.5,
"learning_rate": 4.852673953351249e-05,
"loss": 0.1865,
"step": 1170
},
{
"epoch": 0.11988214975109214,
"grad_norm": 3.75,
"learning_rate": 4.849935340585796e-05,
"loss": 0.2659,
"step": 1180
},
{
"epoch": 0.12089810017271158,
"grad_norm": 3.375,
"learning_rate": 4.8471722952095586e-05,
"loss": 0.1506,
"step": 1190
},
{
"epoch": 0.121914050594331,
"grad_norm": 3.34375,
"learning_rate": 4.844384845950312e-05,
"loss": 0.307,
"step": 1200
},
{
"epoch": 0.12293000101595042,
"grad_norm": 1.578125,
"learning_rate": 4.841573021789561e-05,
"loss": 0.1952,
"step": 1210
},
{
"epoch": 0.12394595143756984,
"grad_norm": 1.2890625,
"learning_rate": 4.838736851962239e-05,
"loss": 0.1779,
"step": 1220
},
{
"epoch": 0.12496190185918928,
"grad_norm": 1.265625,
"learning_rate": 4.835876365956408e-05,
"loss": 0.1235,
"step": 1230
},
{
"epoch": 0.12597785228080868,
"grad_norm": 1.9609375,
"learning_rate": 4.8329915935129436e-05,
"loss": 0.1876,
"step": 1240
},
{
"epoch": 0.12699380270242813,
"grad_norm": 1.6328125,
"learning_rate": 4.830082564625235e-05,
"loss": 0.2188,
"step": 1250
},
{
"epoch": 0.12800975312404755,
"grad_norm": 3.96875,
"learning_rate": 4.8271493095388684e-05,
"loss": 0.2622,
"step": 1260
},
{
"epoch": 0.12902570354566698,
"grad_norm": 3.765625,
"learning_rate": 4.824191858751312e-05,
"loss": 0.2724,
"step": 1270
},
{
"epoch": 0.1300416539672864,
"grad_norm": 5.59375,
"learning_rate": 4.821210243011601e-05,
"loss": 0.2413,
"step": 1280
},
{
"epoch": 0.13105760438890582,
"grad_norm": 3.34375,
"learning_rate": 4.818204493320016e-05,
"loss": 0.2618,
"step": 1290
},
{
"epoch": 0.13207355481052524,
"grad_norm": 2.78125,
"learning_rate": 4.8151746409277634e-05,
"loss": 0.2295,
"step": 1300
},
{
"epoch": 0.13308950523214466,
"grad_norm": 3.1875,
"learning_rate": 4.8121207173366484e-05,
"loss": 0.2733,
"step": 1310
},
{
"epoch": 0.13410545565376408,
"grad_norm": 2.28125,
"learning_rate": 4.809042754298746e-05,
"loss": 0.2311,
"step": 1320
},
{
"epoch": 0.13512140607538353,
"grad_norm": 2.171875,
"learning_rate": 4.805940783816075e-05,
"loss": 0.2059,
"step": 1330
},
{
"epoch": 0.13613735649700295,
"grad_norm": 2.796875,
"learning_rate": 4.8028148381402625e-05,
"loss": 0.2102,
"step": 1340
},
{
"epoch": 0.13715330691862238,
"grad_norm": 2.96875,
"learning_rate": 4.7996649497722084e-05,
"loss": 0.2708,
"step": 1350
},
{
"epoch": 0.1381692573402418,
"grad_norm": 2.4375,
"learning_rate": 4.7964911514617485e-05,
"loss": 0.2429,
"step": 1360
},
{
"epoch": 0.13918520776186122,
"grad_norm": 5.8125,
"learning_rate": 4.793293476207312e-05,
"loss": 0.2725,
"step": 1370
},
{
"epoch": 0.14020115818348064,
"grad_norm": 2.40625,
"learning_rate": 4.790071957255585e-05,
"loss": 0.2098,
"step": 1380
},
{
"epoch": 0.14121710860510006,
"grad_norm": 4.25,
"learning_rate": 4.786826628101154e-05,
"loss": 0.2101,
"step": 1390
},
{
"epoch": 0.14223305902671948,
"grad_norm": 2.578125,
"learning_rate": 4.783557522486167e-05,
"loss": 0.2624,
"step": 1400
},
{
"epoch": 0.14324900944833893,
"grad_norm": 3.125,
"learning_rate": 4.780264674399978e-05,
"loss": 0.2518,
"step": 1410
},
{
"epoch": 0.14426495986995835,
"grad_norm": 3.671875,
"learning_rate": 4.7769481180787966e-05,
"loss": 0.3112,
"step": 1420
},
{
"epoch": 0.14528091029157778,
"grad_norm": 3.984375,
"learning_rate": 4.773607888005327e-05,
"loss": 0.2747,
"step": 1430
},
{
"epoch": 0.1462968607131972,
"grad_norm": 3.234375,
"learning_rate": 4.770244018908416e-05,
"loss": 0.1572,
"step": 1440
},
{
"epoch": 0.14731281113481662,
"grad_norm": 4.09375,
"learning_rate": 4.766856545762687e-05,
"loss": 0.2148,
"step": 1450
},
{
"epoch": 0.14832876155643604,
"grad_norm": 1.6875,
"learning_rate": 4.763445503788178e-05,
"loss": 0.2531,
"step": 1460
},
{
"epoch": 0.14934471197805546,
"grad_norm": 2.375,
"learning_rate": 4.760010928449976e-05,
"loss": 0.199,
"step": 1470
},
{
"epoch": 0.15036066239967488,
"grad_norm": 4.6875,
"learning_rate": 4.7565528554578485e-05,
"loss": 0.2366,
"step": 1480
},
{
"epoch": 0.15137661282129433,
"grad_norm": 5.4375,
"learning_rate": 4.75307132076587e-05,
"loss": 0.1862,
"step": 1490
},
{
"epoch": 0.15239256324291375,
"grad_norm": 2.484375,
"learning_rate": 4.749566360572049e-05,
"loss": 0.2143,
"step": 1500
}
],
"logging_steps": 10,
"max_steps": 9843,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 7,
"trial_name": null,
"trial_params": null
}