rinko-test / trainer_state.json
ikno
Model save
c0f1f7e verified
raw
history blame
30.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9950900163666123,
"eval_steps": 500,
"global_step": 915,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 16.716502969210488,
"learning_rate": 5.4347826086956515e-09,
"loss": 2.043,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 15.483069924073853,
"learning_rate": 2.717391304347826e-08,
"loss": 2.0144,
"step": 5
},
{
"epoch": 0.03,
"grad_norm": 14.970955440826483,
"learning_rate": 5.434782608695652e-08,
"loss": 2.0252,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 16.409859521687437,
"learning_rate": 8.152173913043478e-08,
"loss": 2.0317,
"step": 15
},
{
"epoch": 0.07,
"grad_norm": 15.119202061638624,
"learning_rate": 1.0869565217391303e-07,
"loss": 2.0122,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 14.957507881730114,
"learning_rate": 1.358695652173913e-07,
"loss": 1.9709,
"step": 25
},
{
"epoch": 0.1,
"grad_norm": 13.274098507749727,
"learning_rate": 1.6304347826086955e-07,
"loss": 1.9688,
"step": 30
},
{
"epoch": 0.11,
"grad_norm": 14.11477451327087,
"learning_rate": 1.9021739130434784e-07,
"loss": 1.9494,
"step": 35
},
{
"epoch": 0.13,
"grad_norm": 8.88731632748161,
"learning_rate": 2.1739130434782607e-07,
"loss": 1.9168,
"step": 40
},
{
"epoch": 0.15,
"grad_norm": 7.911411675554511,
"learning_rate": 2.445652173913043e-07,
"loss": 1.8351,
"step": 45
},
{
"epoch": 0.16,
"grad_norm": 7.272959783180739,
"learning_rate": 2.717391304347826e-07,
"loss": 1.796,
"step": 50
},
{
"epoch": 0.18,
"grad_norm": 6.735025921958398,
"learning_rate": 2.9891304347826084e-07,
"loss": 1.756,
"step": 55
},
{
"epoch": 0.2,
"grad_norm": 4.788928779270596,
"learning_rate": 3.260869565217391e-07,
"loss": 1.7134,
"step": 60
},
{
"epoch": 0.21,
"grad_norm": 3.8456380127229584,
"learning_rate": 3.532608695652174e-07,
"loss": 1.654,
"step": 65
},
{
"epoch": 0.23,
"grad_norm": 3.513598430455612,
"learning_rate": 3.8043478260869567e-07,
"loss": 1.62,
"step": 70
},
{
"epoch": 0.25,
"grad_norm": 3.2709987769023225,
"learning_rate": 4.076086956521739e-07,
"loss": 1.6021,
"step": 75
},
{
"epoch": 0.26,
"grad_norm": 2.6615496338038342,
"learning_rate": 4.3478260869565214e-07,
"loss": 1.5568,
"step": 80
},
{
"epoch": 0.28,
"grad_norm": 2.164065581831568,
"learning_rate": 4.619565217391304e-07,
"loss": 1.528,
"step": 85
},
{
"epoch": 0.29,
"grad_norm": 2.0800819028364823,
"learning_rate": 4.891304347826087e-07,
"loss": 1.4875,
"step": 90
},
{
"epoch": 0.31,
"grad_norm": 2.011052602558943,
"learning_rate": 4.999836073996402e-07,
"loss": 1.5089,
"step": 95
},
{
"epoch": 0.33,
"grad_norm": 1.7314022698124276,
"learning_rate": 4.998834381823981e-07,
"loss": 1.4765,
"step": 100
},
{
"epoch": 0.34,
"grad_norm": 1.698172314244185,
"learning_rate": 4.99692243192814e-07,
"loss": 1.479,
"step": 105
},
{
"epoch": 0.36,
"grad_norm": 1.5924897184805729,
"learning_rate": 4.994100920780472e-07,
"loss": 1.4313,
"step": 110
},
{
"epoch": 0.38,
"grad_norm": 1.6198237756392246,
"learning_rate": 4.99037087618111e-07,
"loss": 1.405,
"step": 115
},
{
"epoch": 0.39,
"grad_norm": 1.6433214339592699,
"learning_rate": 4.985733656884334e-07,
"loss": 1.4268,
"step": 120
},
{
"epoch": 0.41,
"grad_norm": 1.585215905595777,
"learning_rate": 4.980190952103605e-07,
"loss": 1.4092,
"step": 125
},
{
"epoch": 0.43,
"grad_norm": 1.6305731412306417,
"learning_rate": 4.973744780896238e-07,
"loss": 1.3617,
"step": 130
},
{
"epoch": 0.44,
"grad_norm": 1.583823751699642,
"learning_rate": 4.966397491427908e-07,
"loss": 1.3597,
"step": 135
},
{
"epoch": 0.46,
"grad_norm": 1.484959829645435,
"learning_rate": 4.958151760117285e-07,
"loss": 1.3353,
"step": 140
},
{
"epoch": 0.47,
"grad_norm": 1.4342113311109135,
"learning_rate": 4.949010590661078e-07,
"loss": 1.3029,
"step": 145
},
{
"epoch": 0.49,
"grad_norm": 1.379625137958501,
"learning_rate": 4.938977312939872e-07,
"loss": 1.2749,
"step": 150
},
{
"epoch": 0.51,
"grad_norm": 1.2678627597286303,
"learning_rate": 4.928055581805149e-07,
"loss": 1.2539,
"step": 155
},
{
"epoch": 0.52,
"grad_norm": 1.1977112273152672,
"learning_rate": 4.916249375747916e-07,
"loss": 1.2506,
"step": 160
},
{
"epoch": 0.54,
"grad_norm": 1.2424603760621953,
"learning_rate": 4.903562995449452e-07,
"loss": 1.2563,
"step": 165
},
{
"epoch": 0.56,
"grad_norm": 1.1271206534398783,
"learning_rate": 4.890001062214688e-07,
"loss": 1.2476,
"step": 170
},
{
"epoch": 0.57,
"grad_norm": 1.1518448899554141,
"learning_rate": 4.875568516288789e-07,
"loss": 1.2349,
"step": 175
},
{
"epoch": 0.59,
"grad_norm": 1.1817144244682463,
"learning_rate": 4.860270615057555e-07,
"loss": 1.2165,
"step": 180
},
{
"epoch": 0.61,
"grad_norm": 1.1263459506930984,
"learning_rate": 4.844112931132302e-07,
"loss": 1.2187,
"step": 185
},
{
"epoch": 0.62,
"grad_norm": 1.0547482535639097,
"learning_rate": 4.827101350319902e-07,
"loss": 1.2125,
"step": 190
},
{
"epoch": 0.64,
"grad_norm": 1.1000431029318167,
"learning_rate": 4.809242069478755e-07,
"loss": 1.2012,
"step": 195
},
{
"epoch": 0.65,
"grad_norm": 1.1158408603050054,
"learning_rate": 4.790541594261431e-07,
"loss": 1.2363,
"step": 200
},
{
"epoch": 0.67,
"grad_norm": 1.1038133372547887,
"learning_rate": 4.771006736744841e-07,
"loss": 1.2126,
"step": 205
},
{
"epoch": 0.69,
"grad_norm": 1.0325798408972995,
"learning_rate": 4.7506446129487827e-07,
"loss": 1.1925,
"step": 210
},
{
"epoch": 0.7,
"grad_norm": 1.0244158176654672,
"learning_rate": 4.7294626402437656e-07,
"loss": 1.2129,
"step": 215
},
{
"epoch": 0.72,
"grad_norm": 1.004472026312722,
"learning_rate": 4.707468534649062e-07,
"loss": 1.1808,
"step": 220
},
{
"epoch": 0.74,
"grad_norm": 1.0183080167380476,
"learning_rate": 4.684670308021974e-07,
"loss": 1.1993,
"step": 225
},
{
"epoch": 0.75,
"grad_norm": 0.9917433442773201,
"learning_rate": 4.6610762651393285e-07,
"loss": 1.2014,
"step": 230
},
{
"epoch": 0.77,
"grad_norm": 0.9652304321103495,
"learning_rate": 4.6366950006722716e-07,
"loss": 1.1899,
"step": 235
},
{
"epoch": 0.79,
"grad_norm": 1.0030899254957348,
"learning_rate": 4.611535396055463e-07,
"loss": 1.1807,
"step": 240
},
{
"epoch": 0.8,
"grad_norm": 0.9792294017825395,
"learning_rate": 4.58560661625181e-07,
"loss": 1.1865,
"step": 245
},
{
"epoch": 0.82,
"grad_norm": 1.0234204375359932,
"learning_rate": 4.5589181064139187e-07,
"loss": 1.1911,
"step": 250
},
{
"epoch": 0.83,
"grad_norm": 1.001857550343514,
"learning_rate": 4.531479588443483e-07,
"loss": 1.1784,
"step": 255
},
{
"epoch": 0.85,
"grad_norm": 1.0265204712627047,
"learning_rate": 4.503301057449855e-07,
"loss": 1.1776,
"step": 260
},
{
"epoch": 0.87,
"grad_norm": 0.9768188781212096,
"learning_rate": 4.474392778109101e-07,
"loss": 1.205,
"step": 265
},
{
"epoch": 0.88,
"grad_norm": 0.9711825909852003,
"learning_rate": 4.444765280924855e-07,
"loss": 1.1848,
"step": 270
},
{
"epoch": 0.9,
"grad_norm": 1.0406571418053925,
"learning_rate": 4.4144293583923455e-07,
"loss": 1.172,
"step": 275
},
{
"epoch": 0.92,
"grad_norm": 0.9599694708712767,
"learning_rate": 4.383396061066974e-07,
"loss": 1.1803,
"step": 280
},
{
"epoch": 0.93,
"grad_norm": 0.9804601001635962,
"learning_rate": 4.351676693538906e-07,
"loss": 1.1799,
"step": 285
},
{
"epoch": 0.95,
"grad_norm": 0.9277200983575595,
"learning_rate": 4.319282810315109e-07,
"loss": 1.1757,
"step": 290
},
{
"epoch": 0.97,
"grad_norm": 0.9052931929609641,
"learning_rate": 4.286226211610358e-07,
"loss": 1.1647,
"step": 295
},
{
"epoch": 0.98,
"grad_norm": 0.9475606426644578,
"learning_rate": 4.2525189390487403e-07,
"loss": 1.1868,
"step": 300
},
{
"epoch": 1.0,
"grad_norm": 0.9480095438475787,
"learning_rate": 4.2181732712772155e-07,
"loss": 1.1777,
"step": 305
},
{
"epoch": 1.0,
"eval_loss": 1.1991753578186035,
"eval_runtime": 552.5832,
"eval_samples_per_second": 0.385,
"eval_steps_per_second": 0.098,
"step": 305
},
{
"epoch": 1.01,
"grad_norm": 0.9649853251126954,
"learning_rate": 4.183201719492837e-07,
"loss": 1.1908,
"step": 310
},
{
"epoch": 1.03,
"grad_norm": 0.9307973686987313,
"learning_rate": 4.147617022885261e-07,
"loss": 1.1392,
"step": 315
},
{
"epoch": 1.05,
"grad_norm": 0.9374655856787941,
"learning_rate": 4.1114321439962085e-07,
"loss": 1.1333,
"step": 320
},
{
"epoch": 1.06,
"grad_norm": 0.9496571637434005,
"learning_rate": 4.0746602639975547e-07,
"loss": 1.1656,
"step": 325
},
{
"epoch": 1.08,
"grad_norm": 0.9262061183605496,
"learning_rate": 4.037314777889792e-07,
"loss": 1.1466,
"step": 330
},
{
"epoch": 1.1,
"grad_norm": 0.921853250947036,
"learning_rate": 3.999409289622591e-07,
"loss": 1.1178,
"step": 335
},
{
"epoch": 1.11,
"grad_norm": 0.921849981902405,
"learning_rate": 3.9609576071392493e-07,
"loss": 1.1624,
"step": 340
},
{
"epoch": 1.13,
"grad_norm": 0.9093850424200817,
"learning_rate": 3.921973737346832e-07,
"loss": 1.1383,
"step": 345
},
{
"epoch": 1.15,
"grad_norm": 0.9486739204504845,
"learning_rate": 3.8824718810138367e-07,
"loss": 1.1391,
"step": 350
},
{
"epoch": 1.16,
"grad_norm": 0.9765974188105699,
"learning_rate": 3.8424664275972345e-07,
"loss": 1.1773,
"step": 355
},
{
"epoch": 1.18,
"grad_norm": 0.9573534898626346,
"learning_rate": 3.801971950000783e-07,
"loss": 1.1564,
"step": 360
},
{
"epoch": 1.19,
"grad_norm": 0.91608560372732,
"learning_rate": 3.7610031992665106e-07,
"loss": 1.1595,
"step": 365
},
{
"epoch": 1.21,
"grad_norm": 0.922365401205156,
"learning_rate": 3.719575099201309e-07,
"loss": 1.1458,
"step": 370
},
{
"epoch": 1.23,
"grad_norm": 0.8960246173391375,
"learning_rate": 3.677702740940603e-07,
"loss": 1.1707,
"step": 375
},
{
"epoch": 1.24,
"grad_norm": 0.9375786732342487,
"learning_rate": 3.6354013774510455e-07,
"loss": 1.1816,
"step": 380
},
{
"epoch": 1.26,
"grad_norm": 0.9357396217542764,
"learning_rate": 3.5926864179742864e-07,
"loss": 1.1486,
"step": 385
},
{
"epoch": 1.28,
"grad_norm": 0.9231104901219429,
"learning_rate": 3.549573422413795e-07,
"loss": 1.1499,
"step": 390
},
{
"epoch": 1.29,
"grad_norm": 0.871929206362279,
"learning_rate": 3.506078095666812e-07,
"loss": 1.1392,
"step": 395
},
{
"epoch": 1.31,
"grad_norm": 0.9066352806550632,
"learning_rate": 3.4622162819034807e-07,
"loss": 1.1309,
"step": 400
},
{
"epoch": 1.33,
"grad_norm": 0.9173513698176368,
"learning_rate": 3.418003958795243e-07,
"loss": 1.1397,
"step": 405
},
{
"epoch": 1.34,
"grad_norm": 0.9113976210277046,
"learning_rate": 3.3734572316946073e-07,
"loss": 1.1575,
"step": 410
},
{
"epoch": 1.36,
"grad_norm": 0.9178151730113013,
"learning_rate": 3.328592327768405e-07,
"loss": 1.1377,
"step": 415
},
{
"epoch": 1.37,
"grad_norm": 0.9007526310368873,
"learning_rate": 3.2834255900866716e-07,
"loss": 1.1319,
"step": 420
},
{
"epoch": 1.39,
"grad_norm": 0.9174055759076049,
"learning_rate": 3.2379734716693075e-07,
"loss": 1.1325,
"step": 425
},
{
"epoch": 1.41,
"grad_norm": 0.8949717617571301,
"learning_rate": 3.1922525294926936e-07,
"loss": 1.1248,
"step": 430
},
{
"epoch": 1.42,
"grad_norm": 0.9421266219905364,
"learning_rate": 3.146279418458428e-07,
"loss": 1.1356,
"step": 435
},
{
"epoch": 1.44,
"grad_norm": 0.939956376079189,
"learning_rate": 3.100070885326395e-07,
"loss": 1.1333,
"step": 440
},
{
"epoch": 1.46,
"grad_norm": 0.8908860472024647,
"learning_rate": 3.053643762614381e-07,
"loss": 1.1244,
"step": 445
},
{
"epoch": 1.47,
"grad_norm": 0.854957997046981,
"learning_rate": 3.0070149624664367e-07,
"loss": 1.116,
"step": 450
},
{
"epoch": 1.49,
"grad_norm": 0.9824002406797512,
"learning_rate": 2.9602014704922413e-07,
"loss": 1.1351,
"step": 455
},
{
"epoch": 1.51,
"grad_norm": 0.9473931201675668,
"learning_rate": 2.913220339579712e-07,
"loss": 1.1486,
"step": 460
},
{
"epoch": 1.52,
"grad_norm": 0.9614723238274686,
"learning_rate": 2.866088683683088e-07,
"loss": 1.1278,
"step": 465
},
{
"epoch": 1.54,
"grad_norm": 0.8966403120315597,
"learning_rate": 2.8188236715887897e-07,
"loss": 1.1502,
"step": 470
},
{
"epoch": 1.55,
"grad_norm": 0.9113074954319733,
"learning_rate": 2.7714425206612884e-07,
"loss": 1.1477,
"step": 475
},
{
"epoch": 1.57,
"grad_norm": 0.9277072403986325,
"learning_rate": 2.7239624905712885e-07,
"loss": 1.1326,
"step": 480
},
{
"epoch": 1.59,
"grad_norm": 0.8933150404518155,
"learning_rate": 2.6764008770084986e-07,
"loss": 1.1122,
"step": 485
},
{
"epoch": 1.6,
"grad_norm": 0.9423200783026597,
"learning_rate": 2.6287750053812746e-07,
"loss": 1.1319,
"step": 490
},
{
"epoch": 1.62,
"grad_norm": 0.9138649347681606,
"learning_rate": 2.581102224505449e-07,
"loss": 1.0938,
"step": 495
},
{
"epoch": 1.64,
"grad_norm": 0.9218456828673846,
"learning_rate": 2.5333999002846235e-07,
"loss": 1.124,
"step": 500
},
{
"epoch": 1.65,
"grad_norm": 0.8863039794287404,
"learning_rate": 2.4856854093842373e-07,
"loss": 1.1149,
"step": 505
},
{
"epoch": 1.67,
"grad_norm": 0.8502000094018759,
"learning_rate": 2.4379761329017255e-07,
"loss": 1.1067,
"step": 510
},
{
"epoch": 1.69,
"grad_norm": 0.9252138466436688,
"learning_rate": 2.3902894500350516e-07,
"loss": 1.1103,
"step": 515
},
{
"epoch": 1.7,
"grad_norm": 0.8961237196153136,
"learning_rate": 2.342642731751933e-07,
"loss": 1.0985,
"step": 520
},
{
"epoch": 1.72,
"grad_norm": 0.869866335392851,
"learning_rate": 2.2950533344620754e-07,
"loss": 1.1104,
"step": 525
},
{
"epoch": 1.73,
"grad_norm": 0.9315973913649565,
"learning_rate": 2.247538593694695e-07,
"loss": 1.0717,
"step": 530
},
{
"epoch": 1.75,
"grad_norm": 0.8902726821310524,
"learning_rate": 2.2001158177836596e-07,
"loss": 1.129,
"step": 535
},
{
"epoch": 1.77,
"grad_norm": 0.9195027929003787,
"learning_rate": 2.1528022815625303e-07,
"loss": 1.1016,
"step": 540
},
{
"epoch": 1.78,
"grad_norm": 0.8626744756177651,
"learning_rate": 2.1056152200718018e-07,
"loss": 1.118,
"step": 545
},
{
"epoch": 1.8,
"grad_norm": 0.8633291274404195,
"learning_rate": 2.0585718222806528e-07,
"loss": 1.0894,
"step": 550
},
{
"epoch": 1.82,
"grad_norm": 0.9198499710986737,
"learning_rate": 2.0116892248254639e-07,
"loss": 1.1263,
"step": 555
},
{
"epoch": 1.83,
"grad_norm": 0.8725009093818972,
"learning_rate": 1.964984505767405e-07,
"loss": 1.0881,
"step": 560
},
{
"epoch": 1.85,
"grad_norm": 0.8481882368797279,
"learning_rate": 1.91847467837136e-07,
"loss": 1.0855,
"step": 565
},
{
"epoch": 1.87,
"grad_norm": 0.9323691915343033,
"learning_rate": 1.8721766849084592e-07,
"loss": 1.1097,
"step": 570
},
{
"epoch": 1.88,
"grad_norm": 0.9005669510238986,
"learning_rate": 1.8261073904844626e-07,
"loss": 1.1016,
"step": 575
},
{
"epoch": 1.9,
"grad_norm": 0.9505847371717607,
"learning_rate": 1.7802835768962628e-07,
"loss": 1.0998,
"step": 580
},
{
"epoch": 1.91,
"grad_norm": 0.9020674746183535,
"learning_rate": 1.7347219365187282e-07,
"loss": 1.1284,
"step": 585
},
{
"epoch": 1.93,
"grad_norm": 0.8913471215755544,
"learning_rate": 1.689439066224122e-07,
"loss": 1.1095,
"step": 590
},
{
"epoch": 1.95,
"grad_norm": 0.9156453302935215,
"learning_rate": 1.6444514613363143e-07,
"loss": 1.1152,
"step": 595
},
{
"epoch": 1.96,
"grad_norm": 0.8754536183127603,
"learning_rate": 1.5997755096219792e-07,
"loss": 1.0841,
"step": 600
},
{
"epoch": 1.98,
"grad_norm": 0.8389293727280541,
"learning_rate": 1.5554274853209775e-07,
"loss": 1.1028,
"step": 605
},
{
"epoch": 2.0,
"grad_norm": 0.91364223904081,
"learning_rate": 1.5114235432180879e-07,
"loss": 1.1046,
"step": 610
},
{
"epoch": 2.0,
"eval_loss": 1.1457586288452148,
"eval_runtime": 552.4602,
"eval_samples_per_second": 0.386,
"eval_steps_per_second": 0.098,
"step": 611
},
{
"epoch": 2.01,
"grad_norm": 0.8685324682917593,
"learning_rate": 1.4677797127582592e-07,
"loss": 1.1065,
"step": 615
},
{
"epoch": 2.03,
"grad_norm": 0.881396936654864,
"learning_rate": 1.4245118922075167e-07,
"loss": 1.1062,
"step": 620
},
{
"epoch": 2.05,
"grad_norm": 0.8605768115061145,
"learning_rate": 1.3816358428616555e-07,
"loss": 1.1139,
"step": 625
},
{
"epoch": 2.06,
"grad_norm": 0.8695988343532115,
"learning_rate": 1.3391671833048222e-07,
"loss": 1.1026,
"step": 630
},
{
"epoch": 2.08,
"grad_norm": 0.9277463151512839,
"learning_rate": 1.2971213837200966e-07,
"loss": 1.1171,
"step": 635
},
{
"epoch": 2.09,
"grad_norm": 0.8936560541708038,
"learning_rate": 1.255513760254111e-07,
"loss": 1.1141,
"step": 640
},
{
"epoch": 2.11,
"grad_norm": 0.8783043744127691,
"learning_rate": 1.2143594694378035e-07,
"loss": 1.0913,
"step": 645
},
{
"epoch": 2.13,
"grad_norm": 0.8926718750269779,
"learning_rate": 1.1736735026652975e-07,
"loss": 1.0963,
"step": 650
},
{
"epoch": 2.14,
"grad_norm": 0.8891415780653408,
"learning_rate": 1.133470680732943e-07,
"loss": 1.1072,
"step": 655
},
{
"epoch": 2.16,
"grad_norm": 0.9510568294433623,
"learning_rate": 1.0937656484405083e-07,
"loss": 1.0701,
"step": 660
},
{
"epoch": 2.18,
"grad_norm": 0.8861055353381297,
"learning_rate": 1.0545728692564632e-07,
"loss": 1.1009,
"step": 665
},
{
"epoch": 2.19,
"grad_norm": 0.9135875600693542,
"learning_rate": 1.015906620049343e-07,
"loss": 1.0917,
"step": 670
},
{
"epoch": 2.21,
"grad_norm": 0.8715772657210394,
"learning_rate": 9.777809858870623e-08,
"loss": 1.0873,
"step": 675
},
{
"epoch": 2.23,
"grad_norm": 0.8496453955301547,
"learning_rate": 9.402098549061083e-08,
"loss": 1.0874,
"step": 680
},
{
"epoch": 2.24,
"grad_norm": 0.8627541455935185,
"learning_rate": 9.032069132524728e-08,
"loss": 1.0828,
"step": 685
},
{
"epoch": 2.26,
"grad_norm": 0.8953058627119301,
"learning_rate": 8.667856400961507e-08,
"loss": 1.0636,
"step": 690
},
{
"epoch": 2.27,
"grad_norm": 0.9074456406630441,
"learning_rate": 8.309593027210543e-08,
"loss": 1.0984,
"step": 695
},
{
"epoch": 2.29,
"grad_norm": 0.885202959051119,
"learning_rate": 7.957409516920921e-08,
"loss": 1.0898,
"step": 700
},
{
"epoch": 2.31,
"grad_norm": 0.8664959542867797,
"learning_rate": 7.611434161012045e-08,
"loss": 1.1019,
"step": 705
},
{
"epoch": 2.32,
"grad_norm": 0.8687463126064324,
"learning_rate": 7.271792988940755e-08,
"loss": 1.0725,
"step": 710
},
{
"epoch": 2.34,
"grad_norm": 0.9668961175895807,
"learning_rate": 6.93860972279214e-08,
"loss": 1.1063,
"step": 715
},
{
"epoch": 2.36,
"grad_norm": 0.9709686806435888,
"learning_rate": 6.612005732211015e-08,
"loss": 1.1057,
"step": 720
},
{
"epoch": 2.37,
"grad_norm": 0.8789480605794918,
"learning_rate": 6.292099990190178e-08,
"loss": 1.1014,
"step": 725
},
{
"epoch": 2.39,
"grad_norm": 0.8557651308313651,
"learning_rate": 5.979009029731789e-08,
"loss": 1.099,
"step": 730
},
{
"epoch": 2.41,
"grad_norm": 0.821933575062032,
"learning_rate": 5.6728469013975876e-08,
"loss": 1.0876,
"step": 735
},
{
"epoch": 2.42,
"grad_norm": 0.9104274652488417,
"learning_rate": 5.373725131763287e-08,
"loss": 1.0894,
"step": 740
},
{
"epoch": 2.44,
"grad_norm": 0.881443701968621,
"learning_rate": 5.081752682792542e-08,
"loss": 1.087,
"step": 745
},
{
"epoch": 2.45,
"grad_norm": 0.9159497602891767,
"learning_rate": 4.7970359121449924e-08,
"loss": 1.121,
"step": 750
},
{
"epoch": 2.47,
"grad_norm": 0.8506596006190186,
"learning_rate": 4.519678534433088e-08,
"loss": 1.108,
"step": 755
},
{
"epoch": 2.49,
"grad_norm": 0.8568944137130331,
"learning_rate": 4.249781583441644e-08,
"loss": 1.0897,
"step": 760
},
{
"epoch": 2.5,
"grad_norm": 0.8910416600450194,
"learning_rate": 3.9874433753239427e-08,
"loss": 1.1024,
"step": 765
},
{
"epoch": 2.52,
"grad_norm": 0.904643756401202,
"learning_rate": 3.732759472787894e-08,
"loss": 1.1059,
"step": 770
},
{
"epoch": 2.54,
"grad_norm": 0.8802351411972301,
"learning_rate": 3.485822650285042e-08,
"loss": 1.1208,
"step": 775
},
{
"epoch": 2.55,
"grad_norm": 0.952490522578208,
"learning_rate": 3.246722860215414e-08,
"loss": 1.1187,
"step": 780
},
{
"epoch": 2.57,
"grad_norm": 0.8914026126857395,
"learning_rate": 3.0155472001602486e-08,
"loss": 1.1034,
"step": 785
},
{
"epoch": 2.59,
"grad_norm": 0.8960856771418554,
"learning_rate": 2.7923798811547105e-08,
"loss": 1.106,
"step": 790
},
{
"epoch": 2.6,
"grad_norm": 0.8524261428700936,
"learning_rate": 2.5773021970121266e-08,
"loss": 1.1115,
"step": 795
},
{
"epoch": 2.62,
"grad_norm": 0.8753031757673515,
"learning_rate": 2.3703924947108044e-08,
"loss": 1.1139,
"step": 800
},
{
"epoch": 2.64,
"grad_norm": 0.889260017399631,
"learning_rate": 2.1717261458543912e-08,
"loss": 1.1165,
"step": 805
},
{
"epoch": 2.65,
"grad_norm": 0.8951574525353929,
"learning_rate": 1.9813755192159803e-08,
"loss": 1.0916,
"step": 810
},
{
"epoch": 2.67,
"grad_norm": 0.8577854322715776,
"learning_rate": 1.7994099543761993e-08,
"loss": 1.0743,
"step": 815
},
{
"epoch": 2.68,
"grad_norm": 0.8772006513774372,
"learning_rate": 1.625895736464622e-08,
"loss": 1.125,
"step": 820
},
{
"epoch": 2.7,
"grad_norm": 0.8752419885342775,
"learning_rate": 1.4608960720139063e-08,
"loss": 1.1034,
"step": 825
},
{
"epoch": 2.72,
"grad_norm": 0.8573911472670636,
"learning_rate": 1.304471065935389e-08,
"loss": 1.0972,
"step": 830
},
{
"epoch": 2.73,
"grad_norm": 0.8899464128181873,
"learning_rate": 1.156677699624442e-08,
"loss": 1.08,
"step": 835
},
{
"epoch": 2.75,
"grad_norm": 0.900376288519936,
"learning_rate": 1.0175698102037473e-08,
"loss": 1.0898,
"step": 840
},
{
"epoch": 2.77,
"grad_norm": 0.8624497890591514,
"learning_rate": 8.87198070911857e-09,
"loss": 1.0796,
"step": 845
},
{
"epoch": 2.78,
"grad_norm": 0.8848072665534406,
"learning_rate": 7.656099726443071e-09,
"loss": 1.1282,
"step": 850
},
{
"epoch": 2.8,
"grad_norm": 0.9099776946050603,
"learning_rate": 6.528498066539962e-09,
"loss": 1.1028,
"step": 855
},
{
"epoch": 2.82,
"grad_norm": 0.8819666433395044,
"learning_rate": 5.4895864841703335e-09,
"loss": 1.1041,
"step": 860
},
{
"epoch": 2.83,
"grad_norm": 0.864829653067709,
"learning_rate": 4.539743426701048e-09,
"loss": 1.0925,
"step": 865
},
{
"epoch": 2.85,
"grad_norm": 0.9179363157687722,
"learning_rate": 3.6793148962462607e-09,
"loss": 1.0937,
"step": 870
},
{
"epoch": 2.86,
"grad_norm": 0.8348352435305063,
"learning_rate": 2.9086143236283225e-09,
"loss": 1.0668,
"step": 875
},
{
"epoch": 2.88,
"grad_norm": 0.873330764161282,
"learning_rate": 2.227922454203529e-09,
"loss": 1.0824,
"step": 880
},
{
"epoch": 2.9,
"grad_norm": 0.8710437274036247,
"learning_rate": 1.6374872455939547e-09,
"loss": 1.0772,
"step": 885
},
{
"epoch": 2.91,
"grad_norm": 0.8778769609639595,
"learning_rate": 1.1375237773633973e-09,
"loss": 1.0961,
"step": 890
},
{
"epoch": 2.93,
"grad_norm": 0.8825077843421442,
"learning_rate": 7.282141726696334e-10,
"loss": 1.1155,
"step": 895
},
{
"epoch": 2.95,
"grad_norm": 0.8557847842209891,
"learning_rate": 4.0970753192190456e-10,
"loss": 1.0864,
"step": 900
},
{
"epoch": 2.96,
"grad_norm": 0.8808858637432918,
"learning_rate": 1.8211987846766807e-10,
"loss": 1.0782,
"step": 905
},
{
"epoch": 2.98,
"grad_norm": 0.8609393480980289,
"learning_rate": 4.553411632840509e-11,
"loss": 1.0913,
"step": 910
},
{
"epoch": 3.0,
"grad_norm": 0.8627091171927345,
"learning_rate": 0.0,
"loss": 1.1129,
"step": 915
},
{
"epoch": 3.0,
"eval_loss": 1.1385215520858765,
"eval_runtime": 551.1914,
"eval_samples_per_second": 0.386,
"eval_steps_per_second": 0.098,
"step": 915
},
{
"epoch": 3.0,
"step": 915,
"total_flos": 149131666391040.0,
"train_loss": 1.2146218933042934,
"train_runtime": 75011.8955,
"train_samples_per_second": 0.098,
"train_steps_per_second": 0.012
}
],
"logging_steps": 5,
"max_steps": 915,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 458,
"total_flos": 149131666391040.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}