groupbert-base-uncased / trainer_state.json
ivanc's picture
End of training
fa52f78
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.035964035964036,
"global_step": 2038,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0012782749814122844,
"loss": 1.6279,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 0.0018077538151554684,
"loss": 1.2509,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 0.0022140372138502386,
"loss": 1.4568,
"step": 15
},
{
"epoch": 0.02,
"learning_rate": 0.0025565499628245687,
"loss": 1.4557,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 0.0028583097523751473,
"loss": 1.1046,
"step": 25
},
{
"epoch": 0.03,
"learning_rate": 0.0031311214554257476,
"loss": 1.3079,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 0.003381997707972616,
"loss": 1.1913,
"step": 35
},
{
"epoch": 0.04,
"learning_rate": 0.0036155076303109367,
"loss": 1.415,
"step": 40
},
{
"epoch": 0.04,
"learning_rate": 0.003834824944236853,
"loss": 1.2117,
"step": 45
},
{
"epoch": 0.05,
"learning_rate": 0.0040422604172722166,
"loss": 1.3286,
"step": 50
},
{
"epoch": 0.05,
"learning_rate": 0.004239558492243069,
"loss": 1.2645,
"step": 55
},
{
"epoch": 0.06,
"learning_rate": 0.004428074427700477,
"loss": 1.3262,
"step": 60
},
{
"epoch": 0.06,
"learning_rate": 0.0046088859896247685,
"loss": 1.3905,
"step": 65
},
{
"epoch": 0.07,
"learning_rate": 0.004782867026529596,
"loss": 1.2985,
"step": 70
},
{
"epoch": 0.07,
"learning_rate": 0.004950737714883372,
"loss": 1.1367,
"step": 75
},
{
"epoch": 0.08,
"learning_rate": 0.0051130999256491375,
"loss": 1.3175,
"step": 80
},
{
"epoch": 0.08,
"learning_rate": 0.005270462766947299,
"loss": 1.4444,
"step": 85
},
{
"epoch": 0.09,
"learning_rate": 0.0054232614454664055,
"loss": 1.2882,
"step": 90
},
{
"epoch": 0.09,
"learning_rate": 0.005571871466032479,
"loss": 1.4907,
"step": 95
},
{
"epoch": 0.1,
"learning_rate": 0.0057166195047502946,
"loss": 1.4369,
"step": 100
},
{
"epoch": 0.1,
"learning_rate": 0.005857791861290061,
"loss": 1.2819,
"step": 105
},
{
"epoch": 0.11,
"learning_rate": 0.005995641118204179,
"loss": 1.3525,
"step": 110
},
{
"epoch": 0.11,
"learning_rate": 0.006130391451319013,
"loss": 1.3266,
"step": 115
},
{
"epoch": 0.12,
"learning_rate": 0.006262242910851495,
"loss": 1.4452,
"step": 120
},
{
"epoch": 0.12,
"learning_rate": 0.006391374907061422,
"loss": 1.5723,
"step": 125
},
{
"epoch": 0.13,
"learning_rate": 0.006517949073958691,
"loss": 1.4178,
"step": 130
},
{
"epoch": 0.13,
"learning_rate": 0.0066421116415507145,
"loss": 1.4234,
"step": 135
},
{
"epoch": 0.14,
"learning_rate": 0.006763995415945232,
"loss": 1.5096,
"step": 140
},
{
"epoch": 0.14,
"learning_rate": 0.006883721443741945,
"loss": 1.3332,
"step": 145
},
{
"epoch": 0.15,
"learning_rate": 0.007001400420140049,
"loss": 1.5342,
"step": 150
},
{
"epoch": 0.15,
"learning_rate": 0.007117133887404524,
"loss": 1.5825,
"step": 155
},
{
"epoch": 0.16,
"learning_rate": 0.007231015260621873,
"loss": 1.3028,
"step": 160
},
{
"epoch": 0.16,
"learning_rate": 0.007343130710225101,
"loss": 1.5338,
"step": 165
},
{
"epoch": 0.17,
"learning_rate": 0.0074535599249993005,
"loss": 1.4564,
"step": 170
},
{
"epoch": 0.17,
"learning_rate": 0.007562376774775252,
"loss": 1.319,
"step": 175
},
{
"epoch": 0.18,
"learning_rate": 0.007669649888473706,
"loss": 1.6363,
"step": 180
},
{
"epoch": 0.18,
"learning_rate": 0.007775443160352296,
"loss": 1.4293,
"step": 185
},
{
"epoch": 0.19,
"learning_rate": 0.007879816195062792,
"loss": 1.4431,
"step": 190
},
{
"epoch": 0.19,
"learning_rate": 0.007982824700322464,
"loss": 1.3983,
"step": 195
},
{
"epoch": 0.2,
"learning_rate": 0.008084520834544433,
"loss": 1.448,
"step": 200
},
{
"epoch": 0.2,
"learning_rate": 0.008184953515585021,
"loss": 1.6799,
"step": 205
},
{
"epoch": 0.21,
"learning_rate": 0.008284168695795142,
"loss": 1.5823,
"step": 210
},
{
"epoch": 0.21,
"learning_rate": 0.008382209607764337,
"loss": 1.4564,
"step": 215
},
{
"epoch": 0.22,
"learning_rate": 0.008479116984486139,
"loss": 1.576,
"step": 220
},
{
"epoch": 0.22,
"learning_rate": 0.008574929257125443,
"loss": 1.5307,
"step": 225
},
{
"epoch": 0.23,
"learning_rate": 0.00866968273311143,
"loss": 1.4519,
"step": 230
},
{
"epoch": 0.23,
"learning_rate": 0.008763411756896685,
"loss": 1.6421,
"step": 235
},
{
"epoch": 0.24,
"learning_rate": 0.008856148855400954,
"loss": 1.321,
"step": 240
},
{
"epoch": 0.24,
"learning_rate": 0.008947924869885989,
"loss": 1.3645,
"step": 245
},
{
"epoch": 0.25,
"learning_rate": 0.009038769075777341,
"loss": 1.6528,
"step": 250
},
{
"epoch": 0.25,
"learning_rate": 0.00912870929175277,
"loss": 1.5315,
"step": 255
},
{
"epoch": 0.26,
"learning_rate": 0.009217771979249537,
"loss": 1.6313,
"step": 260
},
{
"epoch": 0.26,
"learning_rate": 0.00930598233339944,
"loss": 1.6657,
"step": 265
},
{
"epoch": 0.27,
"learning_rate": 0.009393364366277242,
"loss": 1.602,
"step": 270
},
{
"epoch": 0.27,
"learning_rate": 0.00947994098324202,
"loss": 1.5175,
"step": 275
},
{
"epoch": 0.28,
"learning_rate": 0.009565734053059192,
"loss": 1.4379,
"step": 280
},
{
"epoch": 0.28,
"learning_rate": 0.009650764472411541,
"loss": 1.5895,
"step": 285
},
{
"epoch": 0.29,
"learning_rate": 0.009735052225338362,
"loss": 1.9034,
"step": 290
},
{
"epoch": 0.29,
"learning_rate": 0.00981861643808179,
"loss": 1.5616,
"step": 295
},
{
"epoch": 0.3,
"learning_rate": 0.009901475429766745,
"loss": 1.5318,
"step": 300
},
{
"epoch": 0.3,
"learning_rate": 0.009983646759294876,
"loss": 1.6532,
"step": 305
},
{
"epoch": 0.31,
"learning_rate": 0.009976905542725173,
"loss": 1.6601,
"step": 310
},
{
"epoch": 0.31,
"learning_rate": 0.00994803747113164,
"loss": 1.9683,
"step": 315
},
{
"epoch": 0.32,
"learning_rate": 0.009919169399538106,
"loss": 1.694,
"step": 320
},
{
"epoch": 0.32,
"learning_rate": 0.009890301327944573,
"loss": 1.4985,
"step": 325
},
{
"epoch": 0.33,
"learning_rate": 0.009861433256351039,
"loss": 1.7896,
"step": 330
},
{
"epoch": 0.33,
"learning_rate": 0.009832565184757506,
"loss": 1.6075,
"step": 335
},
{
"epoch": 0.34,
"learning_rate": 0.009803697113163973,
"loss": 1.7854,
"step": 340
},
{
"epoch": 0.34,
"learning_rate": 0.009774829041570439,
"loss": 1.6752,
"step": 345
},
{
"epoch": 0.35,
"learning_rate": 0.009745960969976906,
"loss": 1.6439,
"step": 350
},
{
"epoch": 0.35,
"learning_rate": 0.009717092898383371,
"loss": 1.9989,
"step": 355
},
{
"epoch": 0.36,
"learning_rate": 0.009688224826789838,
"loss": 1.8198,
"step": 360
},
{
"epoch": 0.36,
"learning_rate": 0.009659356755196306,
"loss": 1.8466,
"step": 365
},
{
"epoch": 0.37,
"learning_rate": 0.009630488683602771,
"loss": 1.7034,
"step": 370
},
{
"epoch": 0.37,
"learning_rate": 0.009601620612009238,
"loss": 1.6235,
"step": 375
},
{
"epoch": 0.38,
"learning_rate": 0.009572752540415704,
"loss": 1.5958,
"step": 380
},
{
"epoch": 0.38,
"learning_rate": 0.009543884468822171,
"loss": 1.7292,
"step": 385
},
{
"epoch": 0.39,
"learning_rate": 0.009515016397228638,
"loss": 1.6004,
"step": 390
},
{
"epoch": 0.39,
"learning_rate": 0.009486148325635104,
"loss": 1.8302,
"step": 395
},
{
"epoch": 0.4,
"learning_rate": 0.009457280254041571,
"loss": 1.6322,
"step": 400
},
{
"epoch": 0.4,
"learning_rate": 0.009428412182448036,
"loss": 1.4802,
"step": 405
},
{
"epoch": 0.41,
"learning_rate": 0.009399544110854504,
"loss": 1.5544,
"step": 410
},
{
"epoch": 0.41,
"learning_rate": 0.009370676039260971,
"loss": 1.6265,
"step": 415
},
{
"epoch": 0.42,
"learning_rate": 0.009341807967667436,
"loss": 1.7169,
"step": 420
},
{
"epoch": 0.42,
"learning_rate": 0.009312939896073904,
"loss": 1.511,
"step": 425
},
{
"epoch": 0.43,
"learning_rate": 0.009284071824480369,
"loss": 1.63,
"step": 430
},
{
"epoch": 0.43,
"learning_rate": 0.009255203752886836,
"loss": 1.6076,
"step": 435
},
{
"epoch": 0.44,
"learning_rate": 0.009226335681293303,
"loss": 1.5973,
"step": 440
},
{
"epoch": 0.44,
"learning_rate": 0.009197467609699769,
"loss": 1.6757,
"step": 445
},
{
"epoch": 0.45,
"learning_rate": 0.009168599538106236,
"loss": 1.5146,
"step": 450
},
{
"epoch": 0.45,
"learning_rate": 0.009139731466512702,
"loss": 1.459,
"step": 455
},
{
"epoch": 0.46,
"learning_rate": 0.009110863394919169,
"loss": 1.4292,
"step": 460
},
{
"epoch": 0.46,
"learning_rate": 0.009081995323325636,
"loss": 1.7206,
"step": 465
},
{
"epoch": 0.47,
"learning_rate": 0.009053127251732102,
"loss": 1.491,
"step": 470
},
{
"epoch": 0.47,
"learning_rate": 0.009024259180138567,
"loss": 1.5426,
"step": 475
},
{
"epoch": 0.48,
"learning_rate": 0.008995391108545034,
"loss": 1.6884,
"step": 480
},
{
"epoch": 0.48,
"learning_rate": 0.008966523036951501,
"loss": 1.6555,
"step": 485
},
{
"epoch": 0.49,
"learning_rate": 0.008937654965357967,
"loss": 1.6036,
"step": 490
},
{
"epoch": 0.49,
"learning_rate": 0.008908786893764434,
"loss": 1.4405,
"step": 495
},
{
"epoch": 0.5,
"learning_rate": 0.008879918822170901,
"loss": 1.5134,
"step": 500
},
{
"epoch": 0.5,
"learning_rate": 0.008851050750577369,
"loss": 1.6087,
"step": 505
},
{
"epoch": 0.51,
"learning_rate": 0.008822182678983834,
"loss": 1.6898,
"step": 510
},
{
"epoch": 0.51,
"learning_rate": 0.0087933146073903,
"loss": 1.7172,
"step": 515
},
{
"epoch": 0.52,
"learning_rate": 0.008764446535796767,
"loss": 1.6621,
"step": 520
},
{
"epoch": 0.52,
"learning_rate": 0.008735578464203234,
"loss": 1.4144,
"step": 525
},
{
"epoch": 0.53,
"learning_rate": 0.008706710392609701,
"loss": 1.685,
"step": 530
},
{
"epoch": 0.53,
"learning_rate": 0.008677842321016167,
"loss": 1.6112,
"step": 535
},
{
"epoch": 0.54,
"learning_rate": 0.008648974249422632,
"loss": 1.5041,
"step": 540
},
{
"epoch": 0.54,
"learning_rate": 0.0086201061778291,
"loss": 1.5515,
"step": 545
},
{
"epoch": 0.55,
"learning_rate": 0.008591238106235567,
"loss": 1.711,
"step": 550
},
{
"epoch": 0.55,
"learning_rate": 0.008562370034642034,
"loss": 1.6166,
"step": 555
},
{
"epoch": 0.56,
"learning_rate": 0.0085335019630485,
"loss": 1.5324,
"step": 560
},
{
"epoch": 0.56,
"learning_rate": 0.008504633891454965,
"loss": 1.4422,
"step": 565
},
{
"epoch": 0.57,
"learning_rate": 0.008475765819861432,
"loss": 1.6069,
"step": 570
},
{
"epoch": 0.57,
"learning_rate": 0.008446897748267899,
"loss": 1.6231,
"step": 575
},
{
"epoch": 0.58,
"learning_rate": 0.008418029676674366,
"loss": 1.3719,
"step": 580
},
{
"epoch": 0.58,
"learning_rate": 0.008389161605080832,
"loss": 1.6578,
"step": 585
},
{
"epoch": 0.59,
"learning_rate": 0.008360293533487297,
"loss": 1.6525,
"step": 590
},
{
"epoch": 0.59,
"learning_rate": 0.008331425461893764,
"loss": 1.6094,
"step": 595
},
{
"epoch": 0.6,
"learning_rate": 0.008302557390300232,
"loss": 1.633,
"step": 600
},
{
"epoch": 0.6,
"learning_rate": 0.008273689318706697,
"loss": 1.7865,
"step": 605
},
{
"epoch": 0.61,
"learning_rate": 0.008244821247113164,
"loss": 1.602,
"step": 610
},
{
"epoch": 0.61,
"learning_rate": 0.00821595317551963,
"loss": 1.6723,
"step": 615
},
{
"epoch": 0.62,
"learning_rate": 0.008187085103926097,
"loss": 1.5495,
"step": 620
},
{
"epoch": 0.62,
"learning_rate": 0.008158217032332564,
"loss": 1.5716,
"step": 625
},
{
"epoch": 0.63,
"learning_rate": 0.00812934896073903,
"loss": 1.6036,
"step": 630
},
{
"epoch": 0.63,
"learning_rate": 0.008100480889145497,
"loss": 1.4638,
"step": 635
},
{
"epoch": 0.64,
"learning_rate": 0.008071612817551962,
"loss": 1.6912,
"step": 640
},
{
"epoch": 0.64,
"learning_rate": 0.00804274474595843,
"loss": 1.6037,
"step": 645
},
{
"epoch": 0.65,
"learning_rate": 0.008013876674364897,
"loss": 1.5457,
"step": 650
},
{
"epoch": 0.65,
"learning_rate": 0.007985008602771362,
"loss": 1.4561,
"step": 655
},
{
"epoch": 0.66,
"learning_rate": 0.00795614053117783,
"loss": 1.4871,
"step": 660
},
{
"epoch": 0.66,
"learning_rate": 0.007927272459584295,
"loss": 1.5253,
"step": 665
},
{
"epoch": 0.67,
"learning_rate": 0.007898404387990762,
"loss": 1.5775,
"step": 670
},
{
"epoch": 0.67,
"learning_rate": 0.00786953631639723,
"loss": 1.724,
"step": 675
},
{
"epoch": 0.68,
"learning_rate": 0.007840668244803695,
"loss": 1.6629,
"step": 680
},
{
"epoch": 0.68,
"learning_rate": 0.007811800173210163,
"loss": 1.519,
"step": 685
},
{
"epoch": 0.69,
"learning_rate": 0.00778293210161663,
"loss": 1.5684,
"step": 690
},
{
"epoch": 0.69,
"learning_rate": 0.007754064030023096,
"loss": 1.6109,
"step": 695
},
{
"epoch": 0.7,
"learning_rate": 0.007725195958429562,
"loss": 1.6378,
"step": 700
},
{
"epoch": 0.7,
"learning_rate": 0.007696327886836028,
"loss": 1.4673,
"step": 705
},
{
"epoch": 0.71,
"learning_rate": 0.007667459815242496,
"loss": 1.3671,
"step": 710
},
{
"epoch": 0.71,
"learning_rate": 0.007638591743648962,
"loss": 1.6971,
"step": 715
},
{
"epoch": 0.72,
"learning_rate": 0.007609723672055428,
"loss": 1.5656,
"step": 720
},
{
"epoch": 0.72,
"learning_rate": 0.007580855600461894,
"loss": 1.6135,
"step": 725
},
{
"epoch": 0.73,
"learning_rate": 0.007551987528868362,
"loss": 1.6247,
"step": 730
},
{
"epoch": 0.73,
"learning_rate": 0.007523119457274827,
"loss": 1.4691,
"step": 735
},
{
"epoch": 0.74,
"learning_rate": 0.007494251385681295,
"loss": 1.5108,
"step": 740
},
{
"epoch": 0.74,
"learning_rate": 0.007465383314087762,
"loss": 1.7138,
"step": 745
},
{
"epoch": 0.75,
"learning_rate": 0.007436515242494226,
"loss": 1.4466,
"step": 750
},
{
"epoch": 0.75,
"learning_rate": 0.007407647170900693,
"loss": 1.6503,
"step": 755
},
{
"epoch": 0.76,
"learning_rate": 0.00737877909930716,
"loss": 1.5161,
"step": 760
},
{
"epoch": 0.76,
"learning_rate": 0.007349911027713627,
"loss": 1.5638,
"step": 765
},
{
"epoch": 0.77,
"learning_rate": 0.007321042956120093,
"loss": 1.6321,
"step": 770
},
{
"epoch": 0.77,
"learning_rate": 0.007292174884526559,
"loss": 1.5957,
"step": 775
},
{
"epoch": 0.78,
"learning_rate": 0.0072633068129330245,
"loss": 1.4813,
"step": 780
},
{
"epoch": 0.78,
"learning_rate": 0.0072344387413394926,
"loss": 1.6003,
"step": 785
},
{
"epoch": 0.79,
"learning_rate": 0.00720557066974596,
"loss": 1.474,
"step": 790
},
{
"epoch": 0.79,
"learning_rate": 0.007176702598152426,
"loss": 1.6341,
"step": 795
},
{
"epoch": 0.8,
"learning_rate": 0.007147834526558892,
"loss": 1.6502,
"step": 800
},
{
"epoch": 0.8,
"learning_rate": 0.007118966454965358,
"loss": 1.4565,
"step": 805
},
{
"epoch": 0.81,
"learning_rate": 0.007090098383371824,
"loss": 1.4588,
"step": 810
},
{
"epoch": 0.81,
"learning_rate": 0.007061230311778292,
"loss": 1.5407,
"step": 815
},
{
"epoch": 0.82,
"learning_rate": 0.007032362240184759,
"loss": 1.3778,
"step": 820
},
{
"epoch": 0.82,
"learning_rate": 0.007003494168591224,
"loss": 1.5602,
"step": 825
},
{
"epoch": 0.83,
"learning_rate": 0.0069746260969976905,
"loss": 1.6861,
"step": 830
},
{
"epoch": 0.83,
"learning_rate": 0.006945758025404158,
"loss": 1.5511,
"step": 835
},
{
"epoch": 0.84,
"learning_rate": 0.006916889953810624,
"loss": 1.44,
"step": 840
},
{
"epoch": 0.84,
"learning_rate": 0.006888021882217091,
"loss": 1.4159,
"step": 845
},
{
"epoch": 0.85,
"learning_rate": 0.006859153810623558,
"loss": 1.6642,
"step": 850
},
{
"epoch": 0.85,
"learning_rate": 0.006830285739030023,
"loss": 1.461,
"step": 855
},
{
"epoch": 0.86,
"learning_rate": 0.00680141766743649,
"loss": 1.6532,
"step": 860
},
{
"epoch": 0.86,
"learning_rate": 0.0067725495958429575,
"loss": 1.5106,
"step": 865
},
{
"epoch": 0.87,
"learning_rate": 0.006743681524249424,
"loss": 1.564,
"step": 870
},
{
"epoch": 0.87,
"learning_rate": 0.00671481345265589,
"loss": 1.5954,
"step": 875
},
{
"epoch": 0.88,
"learning_rate": 0.006685945381062356,
"loss": 1.5075,
"step": 880
},
{
"epoch": 0.88,
"learning_rate": 0.006657077309468823,
"loss": 1.4821,
"step": 885
},
{
"epoch": 0.89,
"learning_rate": 0.006628209237875289,
"loss": 1.6993,
"step": 890
},
{
"epoch": 0.89,
"learning_rate": 0.006599341166281756,
"loss": 1.3872,
"step": 895
},
{
"epoch": 0.9,
"learning_rate": 0.006570473094688223,
"loss": 1.684,
"step": 900
},
{
"epoch": 0.9,
"learning_rate": 0.006541605023094689,
"loss": 1.3419,
"step": 905
},
{
"epoch": 0.91,
"learning_rate": 0.006512736951501155,
"loss": 1.3974,
"step": 910
},
{
"epoch": 0.91,
"learning_rate": 0.006483868879907622,
"loss": 1.5703,
"step": 915
},
{
"epoch": 0.92,
"learning_rate": 0.006455000808314089,
"loss": 1.5572,
"step": 920
},
{
"epoch": 0.92,
"learning_rate": 0.006426132736720555,
"loss": 1.5941,
"step": 925
},
{
"epoch": 0.93,
"learning_rate": 0.006397264665127021,
"loss": 1.706,
"step": 930
},
{
"epoch": 0.93,
"learning_rate": 0.006368396593533487,
"loss": 1.5107,
"step": 935
},
{
"epoch": 0.94,
"learning_rate": 0.0063395285219399545,
"loss": 1.4807,
"step": 940
},
{
"epoch": 0.94,
"learning_rate": 0.006310660450346422,
"loss": 1.3222,
"step": 945
},
{
"epoch": 0.95,
"learning_rate": 0.006281792378752889,
"loss": 1.4944,
"step": 950
},
{
"epoch": 0.95,
"learning_rate": 0.006252924307159354,
"loss": 1.4851,
"step": 955
},
{
"epoch": 0.96,
"learning_rate": 0.006224056235565821,
"loss": 1.3356,
"step": 960
},
{
"epoch": 0.96,
"learning_rate": 0.006195188163972287,
"loss": 1.5368,
"step": 965
},
{
"epoch": 0.97,
"learning_rate": 0.006166320092378754,
"loss": 1.4879,
"step": 970
},
{
"epoch": 0.97,
"learning_rate": 0.006137452020785221,
"loss": 1.7248,
"step": 975
},
{
"epoch": 0.98,
"learning_rate": 0.006108583949191687,
"loss": 1.5369,
"step": 980
},
{
"epoch": 0.98,
"learning_rate": 0.006079715877598152,
"loss": 1.6353,
"step": 985
},
{
"epoch": 0.99,
"learning_rate": 0.0060508478060046205,
"loss": 1.4272,
"step": 990
},
{
"epoch": 0.99,
"learning_rate": 0.006021979734411086,
"loss": 1.4956,
"step": 995
},
{
"epoch": 1.0,
"learning_rate": 0.005993111662817553,
"loss": 1.6992,
"step": 1000
},
{
"epoch": 1.0,
"learning_rate": 0.00596424359122402,
"loss": 1.49,
"step": 1005
},
{
"epoch": 1.01,
"learning_rate": 0.005935375519630485,
"loss": 1.5019,
"step": 1010
},
{
"epoch": 1.01,
"learning_rate": 0.005906507448036952,
"loss": 1.4958,
"step": 1015
},
{
"epoch": 1.02,
"learning_rate": 0.005877639376443419,
"loss": 1.5754,
"step": 1020
},
{
"epoch": 1.02,
"learning_rate": 0.005848771304849886,
"loss": 1.5251,
"step": 1025
},
{
"epoch": 1.03,
"learning_rate": 0.005819903233256352,
"loss": 1.4752,
"step": 1030
},
{
"epoch": 1.03,
"learning_rate": 0.005791035161662818,
"loss": 1.4888,
"step": 1035
},
{
"epoch": 1.04,
"learning_rate": 0.005762167090069283,
"loss": 1.7421,
"step": 1040
},
{
"epoch": 1.04,
"learning_rate": 0.005733299018475752,
"loss": 1.4161,
"step": 1045
},
{
"epoch": 1.05,
"learning_rate": 0.005704430946882218,
"loss": 1.5144,
"step": 1050
},
{
"epoch": 1.05,
"learning_rate": 0.005675562875288685,
"loss": 1.4593,
"step": 1055
},
{
"epoch": 1.06,
"learning_rate": 0.00564669480369515,
"loss": 1.5569,
"step": 1060
},
{
"epoch": 1.06,
"learning_rate": 0.0056178267321016166,
"loss": 1.699,
"step": 1065
},
{
"epoch": 1.07,
"learning_rate": 0.005588958660508083,
"loss": 1.382,
"step": 1070
},
{
"epoch": 1.07,
"learning_rate": 0.005560090588914552,
"loss": 1.5344,
"step": 1075
},
{
"epoch": 1.08,
"learning_rate": 0.005531222517321017,
"loss": 1.5147,
"step": 1080
},
{
"epoch": 1.08,
"learning_rate": 0.005502354445727483,
"loss": 1.483,
"step": 1085
},
{
"epoch": 1.09,
"learning_rate": 0.005473486374133949,
"loss": 1.4432,
"step": 1090
},
{
"epoch": 1.09,
"learning_rate": 0.005444618302540416,
"loss": 1.4741,
"step": 1095
},
{
"epoch": 1.1,
"learning_rate": 0.005415750230946883,
"loss": 1.3889,
"step": 1100
},
{
"epoch": 1.1,
"learning_rate": 0.00538688215935335,
"loss": 1.5193,
"step": 1105
},
{
"epoch": 1.11,
"learning_rate": 0.005358014087759816,
"loss": 1.3495,
"step": 1110
},
{
"epoch": 1.11,
"learning_rate": 0.005329146016166282,
"loss": 1.4738,
"step": 1115
},
{
"epoch": 1.12,
"learning_rate": 0.005300277944572749,
"loss": 1.4875,
"step": 1120
},
{
"epoch": 1.12,
"learning_rate": 0.005271409872979214,
"loss": 1.5795,
"step": 1125
},
{
"epoch": 1.13,
"learning_rate": 0.0052425418013856825,
"loss": 1.5649,
"step": 1130
},
{
"epoch": 1.13,
"learning_rate": 0.005213673729792149,
"loss": 1.3385,
"step": 1135
},
{
"epoch": 1.14,
"learning_rate": 0.005184805658198614,
"loss": 1.6503,
"step": 1140
},
{
"epoch": 1.14,
"learning_rate": 0.005155937586605081,
"loss": 1.4296,
"step": 1145
},
{
"epoch": 1.15,
"learning_rate": 0.005127069515011548,
"loss": 1.4508,
"step": 1150
},
{
"epoch": 1.15,
"learning_rate": 0.005098201443418014,
"loss": 1.6112,
"step": 1155
},
{
"epoch": 1.16,
"learning_rate": 0.0050693333718244814,
"loss": 1.4412,
"step": 1160
},
{
"epoch": 1.16,
"learning_rate": 0.005040465300230946,
"loss": 1.3971,
"step": 1165
},
{
"epoch": 1.17,
"learning_rate": 0.005011597228637413,
"loss": 1.5099,
"step": 1170
},
{
"epoch": 1.17,
"learning_rate": 0.0049827291570438805,
"loss": 1.5568,
"step": 1175
},
{
"epoch": 1.18,
"learning_rate": 0.004953861085450347,
"loss": 1.5243,
"step": 1180
},
{
"epoch": 1.18,
"learning_rate": 0.004924993013856814,
"loss": 1.2718,
"step": 1185
},
{
"epoch": 1.19,
"learning_rate": 0.0048961249422632795,
"loss": 1.6473,
"step": 1190
},
{
"epoch": 1.19,
"learning_rate": 0.004867256870669747,
"loss": 1.5639,
"step": 1195
},
{
"epoch": 1.2,
"learning_rate": 0.004838388799076213,
"loss": 1.3558,
"step": 1200
},
{
"epoch": 1.2,
"learning_rate": 0.004809520727482679,
"loss": 1.4211,
"step": 1205
},
{
"epoch": 1.21,
"learning_rate": 0.004780652655889147,
"loss": 1.5578,
"step": 1210
},
{
"epoch": 1.21,
"learning_rate": 0.004751784584295613,
"loss": 1.6661,
"step": 1215
},
{
"epoch": 1.22,
"learning_rate": 0.0047229165127020785,
"loss": 1.3678,
"step": 1220
},
{
"epoch": 1.22,
"learning_rate": 0.004694048441108546,
"loss": 1.4871,
"step": 1225
},
{
"epoch": 1.23,
"learning_rate": 0.004665180369515012,
"loss": 1.4524,
"step": 1230
},
{
"epoch": 1.23,
"learning_rate": 0.004636312297921479,
"loss": 1.4723,
"step": 1235
},
{
"epoch": 1.24,
"learning_rate": 0.004607444226327946,
"loss": 1.3289,
"step": 1240
},
{
"epoch": 1.24,
"learning_rate": 0.004578576154734411,
"loss": 1.5643,
"step": 1245
},
{
"epoch": 1.25,
"learning_rate": 0.004549708083140878,
"loss": 1.2773,
"step": 1250
},
{
"epoch": 1.25,
"learning_rate": 0.004520840011547345,
"loss": 1.4601,
"step": 1255
},
{
"epoch": 1.26,
"learning_rate": 0.004491971939953812,
"loss": 1.2864,
"step": 1260
},
{
"epoch": 1.26,
"learning_rate": 0.004463103868360278,
"loss": 1.3144,
"step": 1265
},
{
"epoch": 1.27,
"learning_rate": 0.004434235796766744,
"loss": 1.4182,
"step": 1270
},
{
"epoch": 1.27,
"learning_rate": 0.004405367725173211,
"loss": 1.5354,
"step": 1275
},
{
"epoch": 1.28,
"learning_rate": 0.004376499653579677,
"loss": 1.4776,
"step": 1280
},
{
"epoch": 1.28,
"learning_rate": 0.0043476315819861435,
"loss": 1.5618,
"step": 1285
},
{
"epoch": 1.29,
"learning_rate": 0.004318763510392611,
"loss": 1.3853,
"step": 1290
},
{
"epoch": 1.29,
"learning_rate": 0.004289895438799076,
"loss": 1.4081,
"step": 1295
},
{
"epoch": 1.3,
"learning_rate": 0.0042610273672055434,
"loss": 1.2231,
"step": 1300
},
{
"epoch": 1.3,
"learning_rate": 0.00423215929561201,
"loss": 1.4096,
"step": 1305
},
{
"epoch": 1.31,
"learning_rate": 0.004203291224018476,
"loss": 1.724,
"step": 1310
},
{
"epoch": 1.31,
"learning_rate": 0.004174423152424943,
"loss": 1.2597,
"step": 1315
},
{
"epoch": 1.32,
"learning_rate": 0.004145555080831409,
"loss": 1.3268,
"step": 1320
},
{
"epoch": 1.32,
"learning_rate": 0.004116687009237876,
"loss": 1.3372,
"step": 1325
},
{
"epoch": 1.33,
"learning_rate": 0.004087818937644342,
"loss": 1.4024,
"step": 1330
},
{
"epoch": 1.33,
"learning_rate": 0.004058950866050809,
"loss": 1.3837,
"step": 1335
},
{
"epoch": 1.34,
"learning_rate": 0.004030082794457276,
"loss": 1.4427,
"step": 1340
},
{
"epoch": 1.34,
"learning_rate": 0.004001214722863742,
"loss": 1.5162,
"step": 1345
},
{
"epoch": 1.35,
"learning_rate": 0.003972346651270208,
"loss": 1.5025,
"step": 1350
},
{
"epoch": 1.35,
"learning_rate": 0.003943478579676675,
"loss": 1.3713,
"step": 1355
},
{
"epoch": 1.36,
"learning_rate": 0.003914610508083141,
"loss": 1.2259,
"step": 1360
},
{
"epoch": 1.36,
"learning_rate": 0.003885742436489608,
"loss": 1.4999,
"step": 1365
},
{
"epoch": 1.37,
"learning_rate": 0.003856874364896074,
"loss": 1.3068,
"step": 1370
},
{
"epoch": 1.37,
"learning_rate": 0.003828006293302541,
"loss": 1.4383,
"step": 1375
},
{
"epoch": 1.38,
"learning_rate": 0.003799138221709007,
"loss": 1.3846,
"step": 1380
},
{
"epoch": 1.38,
"learning_rate": 0.0037702701501154735,
"loss": 1.4892,
"step": 1385
},
{
"epoch": 1.39,
"learning_rate": 0.0037414020785219403,
"loss": 1.4041,
"step": 1390
},
{
"epoch": 1.39,
"learning_rate": 0.003712534006928406,
"loss": 1.6152,
"step": 1395
},
{
"epoch": 1.4,
"learning_rate": 0.003683665935334873,
"loss": 1.3959,
"step": 1400
},
{
"epoch": 1.4,
"learning_rate": 0.00365479786374134,
"loss": 1.2466,
"step": 1405
},
{
"epoch": 1.41,
"learning_rate": 0.003625929792147806,
"loss": 1.4626,
"step": 1410
},
{
"epoch": 1.41,
"learning_rate": 0.003597061720554272,
"loss": 1.1847,
"step": 1415
},
{
"epoch": 1.42,
"learning_rate": 0.003568193648960739,
"loss": 1.3083,
"step": 1420
},
{
"epoch": 1.42,
"learning_rate": 0.0035393255773672055,
"loss": 1.3253,
"step": 1425
},
{
"epoch": 1.43,
"learning_rate": 0.0035104575057736728,
"loss": 1.372,
"step": 1430
},
{
"epoch": 1.43,
"learning_rate": 0.0034815894341801387,
"loss": 1.4528,
"step": 1435
},
{
"epoch": 1.44,
"learning_rate": 0.003452721362586605,
"loss": 1.5115,
"step": 1440
},
{
"epoch": 1.44,
"learning_rate": 0.003423853290993072,
"loss": 1.3855,
"step": 1445
},
{
"epoch": 1.45,
"learning_rate": 0.0033949852193995377,
"loss": 1.2547,
"step": 1450
},
{
"epoch": 1.45,
"learning_rate": 0.0033661171478060045,
"loss": 1.4584,
"step": 1455
},
{
"epoch": 1.46,
"learning_rate": 0.0033372490762124713,
"loss": 1.2172,
"step": 1460
},
{
"epoch": 1.46,
"learning_rate": 0.0033083810046189376,
"loss": 1.3224,
"step": 1465
},
{
"epoch": 1.47,
"learning_rate": 0.003279512933025405,
"loss": 1.5014,
"step": 1470
},
{
"epoch": 1.47,
"learning_rate": 0.0032506448614318707,
"loss": 1.4566,
"step": 1475
},
{
"epoch": 1.48,
"learning_rate": 0.0032217767898383366,
"loss": 1.5488,
"step": 1480
},
{
"epoch": 1.48,
"learning_rate": 0.0031929087182448043,
"loss": 1.3555,
"step": 1485
},
{
"epoch": 1.49,
"learning_rate": 0.00316404064665127,
"loss": 1.488,
"step": 1490
},
{
"epoch": 1.49,
"learning_rate": 0.0031351725750577374,
"loss": 1.3927,
"step": 1495
},
{
"epoch": 1.5,
"learning_rate": 0.0031063045034642033,
"loss": 1.1759,
"step": 1500
},
{
"epoch": 1.5,
"learning_rate": 0.00307743643187067,
"loss": 1.4156,
"step": 1505
},
{
"epoch": 1.51,
"learning_rate": 0.0030485683602771364,
"loss": 1.3284,
"step": 1510
},
{
"epoch": 1.51,
"learning_rate": 0.003019700288683603,
"loss": 1.5301,
"step": 1515
},
{
"epoch": 1.52,
"learning_rate": 0.0029908322170900687,
"loss": 1.4688,
"step": 1520
},
{
"epoch": 1.52,
"learning_rate": 0.002961964145496536,
"loss": 1.3861,
"step": 1525
},
{
"epoch": 1.53,
"learning_rate": 0.0029330960739030023,
"loss": 1.3078,
"step": 1530
},
{
"epoch": 1.53,
"learning_rate": 0.00290422800230947,
"loss": 1.5033,
"step": 1535
},
{
"epoch": 1.54,
"learning_rate": 0.0028753599307159354,
"loss": 1.3179,
"step": 1540
},
{
"epoch": 1.54,
"learning_rate": 0.0028464918591224013,
"loss": 1.3151,
"step": 1545
},
{
"epoch": 1.55,
"learning_rate": 0.0028176237875288685,
"loss": 1.2228,
"step": 1550
},
{
"epoch": 1.55,
"learning_rate": 0.002788755715935335,
"loss": 1.4193,
"step": 1555
},
{
"epoch": 1.56,
"learning_rate": 0.002759887644341802,
"loss": 1.5102,
"step": 1560
},
{
"epoch": 1.56,
"learning_rate": 0.002731019572748268,
"loss": 1.4108,
"step": 1565
},
{
"epoch": 1.57,
"learning_rate": 0.0027021515011547343,
"loss": 1.6702,
"step": 1570
},
{
"epoch": 1.57,
"learning_rate": 0.002673283429561201,
"loss": 1.4893,
"step": 1575
},
{
"epoch": 1.58,
"learning_rate": 0.002644415357967667,
"loss": 1.5393,
"step": 1580
},
{
"epoch": 1.58,
"learning_rate": 0.002615547286374133,
"loss": 1.3174,
"step": 1585
},
{
"epoch": 1.59,
"learning_rate": 0.0025866792147806006,
"loss": 1.5737,
"step": 1590
},
{
"epoch": 1.59,
"learning_rate": 0.002557811143187067,
"loss": 1.3413,
"step": 1595
},
{
"epoch": 1.6,
"learning_rate": 0.002528943071593534,
"loss": 1.4321,
"step": 1600
},
{
"epoch": 1.6,
"learning_rate": 0.002500075,
"loss": 1.5292,
"step": 1605
},
{
"epoch": 1.61,
"learning_rate": 0.002471206928406466,
"loss": 1.1747,
"step": 1610
},
{
"epoch": 1.61,
"learning_rate": 0.002442338856812933,
"loss": 1.3613,
"step": 1615
},
{
"epoch": 1.62,
"learning_rate": 0.0024134707852193995,
"loss": 1.4922,
"step": 1620
},
{
"epoch": 1.62,
"learning_rate": 0.0023846027136258667,
"loss": 1.4311,
"step": 1625
},
{
"epoch": 1.63,
"learning_rate": 0.0023557346420323326,
"loss": 1.4672,
"step": 1630
},
{
"epoch": 1.63,
"learning_rate": 0.002326866570438799,
"loss": 1.3726,
"step": 1635
},
{
"epoch": 1.64,
"learning_rate": 0.0022979984988452658,
"loss": 1.2646,
"step": 1640
},
{
"epoch": 1.64,
"learning_rate": 0.002269130427251732,
"loss": 1.2819,
"step": 1645
},
{
"epoch": 1.65,
"learning_rate": 0.002240262355658198,
"loss": 1.3917,
"step": 1650
},
{
"epoch": 1.65,
"learning_rate": 0.0022113942840646652,
"loss": 1.3339,
"step": 1655
},
{
"epoch": 1.66,
"learning_rate": 0.0021825262124711316,
"loss": 1.439,
"step": 1660
},
{
"epoch": 1.66,
"learning_rate": 0.002153658140877599,
"loss": 1.3474,
"step": 1665
},
{
"epoch": 1.67,
"learning_rate": 0.0021247900692840647,
"loss": 1.1514,
"step": 1670
},
{
"epoch": 1.67,
"learning_rate": 0.0020959219976905306,
"loss": 1.3472,
"step": 1675
},
{
"epoch": 1.68,
"learning_rate": 0.002067053926096998,
"loss": 1.3059,
"step": 1680
},
{
"epoch": 1.68,
"learning_rate": 0.002038185854503464,
"loss": 1.4306,
"step": 1685
},
{
"epoch": 1.69,
"learning_rate": 0.0020093177829099314,
"loss": 1.4762,
"step": 1690
},
{
"epoch": 1.69,
"learning_rate": 0.0019804497113163973,
"loss": 1.2309,
"step": 1695
},
{
"epoch": 1.7,
"learning_rate": 0.0019515816397228636,
"loss": 1.4477,
"step": 1700
},
{
"epoch": 1.7,
"learning_rate": 0.0019227135681293308,
"loss": 1.3584,
"step": 1705
},
{
"epoch": 1.71,
"learning_rate": 0.001893845496535797,
"loss": 1.2721,
"step": 1710
},
{
"epoch": 1.71,
"learning_rate": 0.0018649774249422629,
"loss": 1.3599,
"step": 1715
},
{
"epoch": 1.72,
"learning_rate": 0.0018361093533487299,
"loss": 1.3039,
"step": 1720
},
{
"epoch": 1.72,
"learning_rate": 0.0018072412817551962,
"loss": 1.4414,
"step": 1725
},
{
"epoch": 1.73,
"learning_rate": 0.0017783732101616634,
"loss": 1.3903,
"step": 1730
},
{
"epoch": 1.73,
"learning_rate": 0.0017495051385681296,
"loss": 1.4272,
"step": 1735
},
{
"epoch": 1.74,
"learning_rate": 0.0017206370669745957,
"loss": 1.3649,
"step": 1740
},
{
"epoch": 1.74,
"learning_rate": 0.0016917689953810627,
"loss": 1.2328,
"step": 1745
},
{
"epoch": 1.75,
"learning_rate": 0.001662900923787529,
"loss": 1.2621,
"step": 1750
},
{
"epoch": 1.75,
"learning_rate": 0.001634032852193996,
"loss": 1.2631,
"step": 1755
},
{
"epoch": 1.76,
"learning_rate": 0.0016051647806004622,
"loss": 1.3229,
"step": 1760
},
{
"epoch": 1.76,
"learning_rate": 0.0015762967090069283,
"loss": 1.4429,
"step": 1765
},
{
"epoch": 1.77,
"learning_rate": 0.0015474286374133955,
"loss": 1.3552,
"step": 1770
},
{
"epoch": 1.77,
"learning_rate": 0.0015185605658198618,
"loss": 1.3257,
"step": 1775
},
{
"epoch": 1.78,
"learning_rate": 0.0014896924942263275,
"loss": 1.1749,
"step": 1780
},
{
"epoch": 1.78,
"learning_rate": 0.0014608244226327945,
"loss": 1.2705,
"step": 1785
},
{
"epoch": 1.79,
"learning_rate": 0.0014319563510392609,
"loss": 1.4362,
"step": 1790
},
{
"epoch": 1.79,
"learning_rate": 0.001403088279445728,
"loss": 1.1888,
"step": 1795
},
{
"epoch": 1.8,
"learning_rate": 0.0013742202078521942,
"loss": 1.3183,
"step": 1800
},
{
"epoch": 1.8,
"learning_rate": 0.0013453521362586604,
"loss": 1.4149,
"step": 1805
},
{
"epoch": 1.81,
"learning_rate": 0.0013164840646651273,
"loss": 1.2567,
"step": 1810
},
{
"epoch": 1.81,
"learning_rate": 0.0012876159930715937,
"loss": 1.251,
"step": 1815
},
{
"epoch": 1.82,
"learning_rate": 0.0012587479214780596,
"loss": 1.1756,
"step": 1820
},
{
"epoch": 1.82,
"learning_rate": 0.0012298798498845268,
"loss": 1.4292,
"step": 1825
},
{
"epoch": 1.83,
"learning_rate": 0.001201011778290993,
"loss": 1.354,
"step": 1830
},
{
"epoch": 1.83,
"learning_rate": 0.0011721437066974602,
"loss": 1.2577,
"step": 1835
},
{
"epoch": 1.84,
"learning_rate": 0.0011432756351039263,
"loss": 1.3255,
"step": 1840
},
{
"epoch": 1.84,
"learning_rate": 0.0011144075635103924,
"loss": 1.1484,
"step": 1845
},
{
"epoch": 1.85,
"learning_rate": 0.0010855394919168594,
"loss": 1.3225,
"step": 1850
},
{
"epoch": 1.85,
"learning_rate": 0.0010566714203233255,
"loss": 1.4918,
"step": 1855
},
{
"epoch": 1.86,
"learning_rate": 0.0010278033487297927,
"loss": 1.2976,
"step": 1860
},
{
"epoch": 1.86,
"learning_rate": 0.0009989352771362589,
"loss": 1.2532,
"step": 1865
},
{
"epoch": 1.87,
"learning_rate": 0.000970067205542725,
"loss": 1.3491,
"step": 1870
},
{
"epoch": 1.87,
"learning_rate": 0.000941199133949192,
"loss": 1.3206,
"step": 1875
},
{
"epoch": 1.88,
"learning_rate": 0.0009123310623556581,
"loss": 1.4938,
"step": 1880
},
{
"epoch": 1.88,
"learning_rate": 0.0008834629907621243,
"loss": 1.424,
"step": 1885
},
{
"epoch": 1.89,
"learning_rate": 0.0008545949191685913,
"loss": 1.3984,
"step": 1890
},
{
"epoch": 1.89,
"learning_rate": 0.0008257268475750575,
"loss": 1.1661,
"step": 1895
},
{
"epoch": 1.9,
"learning_rate": 0.0007968587759815248,
"loss": 1.4248,
"step": 1900
},
{
"epoch": 1.9,
"learning_rate": 0.0007679907043879908,
"loss": 1.357,
"step": 1905
},
{
"epoch": 1.91,
"learning_rate": 0.0007391226327944569,
"loss": 1.269,
"step": 1910
},
{
"epoch": 1.91,
"learning_rate": 0.0007102545612009241,
"loss": 1.3927,
"step": 1915
},
{
"epoch": 1.92,
"learning_rate": 0.0006813864896073901,
"loss": 1.2803,
"step": 1920
},
{
"epoch": 1.92,
"learning_rate": 0.0006525184180138573,
"loss": 1.5049,
"step": 1925
},
{
"epoch": 1.93,
"learning_rate": 0.0006236503464203234,
"loss": 1.3776,
"step": 1930
},
{
"epoch": 1.93,
"learning_rate": 0.0005947822748267896,
"loss": 1.3658,
"step": 1935
},
{
"epoch": 1.94,
"learning_rate": 0.0005659142032332567,
"loss": 1.0963,
"step": 1940
},
{
"epoch": 1.94,
"learning_rate": 0.0005370461316397228,
"loss": 1.4584,
"step": 1945
},
{
"epoch": 1.95,
"learning_rate": 0.0005081780600461889,
"loss": 1.2742,
"step": 1950
},
{
"epoch": 1.95,
"learning_rate": 0.0004793099884526561,
"loss": 1.1752,
"step": 1955
},
{
"epoch": 1.96,
"learning_rate": 0.0004504419168591222,
"loss": 1.1821,
"step": 1960
},
{
"epoch": 1.96,
"learning_rate": 0.0004215738452655894,
"loss": 1.3403,
"step": 1965
},
{
"epoch": 1.97,
"learning_rate": 0.00039270577367205543,
"loss": 1.449,
"step": 1970
},
{
"epoch": 1.97,
"learning_rate": 0.00036383770207852156,
"loss": 1.1671,
"step": 1975
},
{
"epoch": 1.98,
"learning_rate": 0.00033496963048498877,
"loss": 1.4356,
"step": 1980
},
{
"epoch": 1.98,
"learning_rate": 0.00030610155889145485,
"loss": 1.2352,
"step": 1985
},
{
"epoch": 1.99,
"learning_rate": 0.00027723348729792206,
"loss": 1.3243,
"step": 1990
},
{
"epoch": 1.99,
"learning_rate": 0.00024836541570438813,
"loss": 1.2791,
"step": 1995
},
{
"epoch": 2.0,
"learning_rate": 0.0002194973441108542,
"loss": 1.2112,
"step": 2000
},
{
"epoch": 2.0,
"learning_rate": 0.0001906292725173214,
"loss": 1.1661,
"step": 2005
},
{
"epoch": 2.01,
"learning_rate": 0.0001617612009237875,
"loss": 1.4046,
"step": 2010
},
{
"epoch": 2.01,
"learning_rate": 0.00013289312933025357,
"loss": 1.2514,
"step": 2015
},
{
"epoch": 2.02,
"learning_rate": 0.00010402505773672078,
"loss": 1.1908,
"step": 2020
},
{
"epoch": 2.02,
"learning_rate": 7.515698614318685e-05,
"loss": 1.3698,
"step": 2025
},
{
"epoch": 2.03,
"learning_rate": 4.628891454965405e-05,
"loss": 1.5172,
"step": 2030
},
{
"epoch": 2.03,
"learning_rate": 1.7420842956120128e-05,
"loss": 1.3045,
"step": 2035
},
{
"epoch": 2.04,
"step": 2038,
"total_flos": 1.1512671589937643e+23,
"train_loss": 1.4677173125030247,
"train_runtime": 22959.9641,
"train_samples_per_second": 1454.296,
"train_steps_per_second": 0.089
}
],
"max_steps": 2038,
"num_train_epochs": 3,
"start_time": 1669316150.4760864,
"total_flos": 1.1512671589937643e+23,
"trial_name": null,
"trial_params": null
}