groupbert-base-uncased / trainer_state.json
ivanc's picture
End of training
f91459b
raw history blame
No virus
49.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.035964035964036,
"global_step": 2038,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0013736056394868903,
"loss": 1.4723,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 0.0019425717247145285,
"loss": 1.568,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 0.0023791547571544325,
"loss": 1.2706,
"step": 15
},
{
"epoch": 0.02,
"learning_rate": 0.0027472112789737805,
"loss": 1.3258,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 0.003071475584169756,
"loss": 1.2558,
"step": 25
},
{
"epoch": 0.03,
"learning_rate": 0.0033646329245522657,
"loss": 0.8074,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 0.0036342189215581556,
"loss": 1.4713,
"step": 35
},
{
"epoch": 0.04,
"learning_rate": 0.003885143449429057,
"loss": 1.2681,
"step": 40
},
{
"epoch": 0.04,
"learning_rate": 0.004120816918460671,
"loss": 1.2291,
"step": 45
},
{
"epoch": 0.05,
"learning_rate": 0.004343722427630694,
"loss": 1.306,
"step": 50
},
{
"epoch": 0.05,
"learning_rate": 0.004555734516094203,
"loss": 1.1953,
"step": 55
},
{
"epoch": 0.06,
"learning_rate": 0.004758309514308865,
"loss": 1.2176,
"step": 60
},
{
"epoch": 0.06,
"learning_rate": 0.0049526055654364855,
"loss": 1.2338,
"step": 65
},
{
"epoch": 0.07,
"learning_rate": 0.005139561687500467,
"loss": 1.381,
"step": 70
},
{
"epoch": 0.07,
"learning_rate": 0.005319951765989316,
"loss": 1.5546,
"step": 75
},
{
"epoch": 0.08,
"learning_rate": 0.005494422557947561,
"loss": 1.5126,
"step": 80
},
{
"epoch": 0.08,
"learning_rate": 0.005663521139548541,
"loss": 1.5132,
"step": 85
},
{
"epoch": 0.09,
"learning_rate": 0.005827715174143585,
"loss": 1.2736,
"step": 90
},
{
"epoch": 0.09,
"learning_rate": 0.005987408170800916,
"loss": 1.1807,
"step": 95
},
{
"epoch": 0.1,
"learning_rate": 0.006142951168339512,
"loss": 1.392,
"step": 100
},
{
"epoch": 0.1,
"learning_rate": 0.0062946518179668965,
"loss": 1.4068,
"step": 105
},
{
"epoch": 0.11,
"learning_rate": 0.0064427815392316505,
"loss": 1.2436,
"step": 110
},
{
"epoch": 0.11,
"learning_rate": 0.0065875812264513545,
"loss": 1.6382,
"step": 115
},
{
"epoch": 0.12,
"learning_rate": 0.006729265849104531,
"loss": 1.5855,
"step": 120
},
{
"epoch": 0.12,
"learning_rate": 0.006868028197434452,
"loss": 1.8619,
"step": 125
},
{
"epoch": 0.13,
"learning_rate": 0.007004041959724749,
"loss": 1.4162,
"step": 130
},
{
"epoch": 0.13,
"learning_rate": 0.007137464271463298,
"loss": 1.397,
"step": 135
},
{
"epoch": 0.14,
"learning_rate": 0.007268437843116311,
"loss": 1.376,
"step": 140
},
{
"epoch": 0.14,
"learning_rate": 0.0073970927486462865,
"loss": 1.5437,
"step": 145
},
{
"epoch": 0.15,
"learning_rate": 0.007523547938632788,
"loss": 1.8505,
"step": 150
},
{
"epoch": 0.15,
"learning_rate": 0.0076479125281174514,
"loss": 1.3138,
"step": 155
},
{
"epoch": 0.16,
"learning_rate": 0.007770286898858114,
"loss": 1.6157,
"step": 160
},
{
"epoch": 0.16,
"learning_rate": 0.00789076364767037,
"loss": 1.4061,
"step": 165
},
{
"epoch": 0.17,
"learning_rate": 0.008009428406336274,
"loss": 1.4829,
"step": 170
},
{
"epoch": 0.17,
"learning_rate": 0.008126360553720012,
"loss": 1.3897,
"step": 175
},
{
"epoch": 0.18,
"learning_rate": 0.008241633836921341,
"loss": 1.3801,
"step": 180
},
{
"epoch": 0.18,
"learning_rate": 0.008355316915277182,
"loss": 1.6133,
"step": 185
},
{
"epoch": 0.19,
"learning_rate": 0.008467473838610143,
"loss": 1.6826,
"step": 190
},
{
"epoch": 0.19,
"learning_rate": 0.008578164469184382,
"loss": 1.6101,
"step": 195
},
{
"epoch": 0.2,
"learning_rate": 0.008687444855261389,
"loss": 1.7164,
"step": 200
},
{
"epoch": 0.2,
"learning_rate": 0.008795367562872955,
"loss": 1.6603,
"step": 205
},
{
"epoch": 0.21,
"learning_rate": 0.008901981971385245,
"loss": 1.3604,
"step": 210
},
{
"epoch": 0.21,
"learning_rate": 0.009007334537569819,
"loss": 1.5517,
"step": 215
},
{
"epoch": 0.22,
"learning_rate": 0.009111469032188405,
"loss": 1.6048,
"step": 220
},
{
"epoch": 0.22,
"learning_rate": 0.009214426752509268,
"loss": 1.6281,
"step": 225
},
{
"epoch": 0.23,
"learning_rate": 0.009316246713681893,
"loss": 2.1204,
"step": 230
},
{
"epoch": 0.23,
"learning_rate": 0.009416965821485117,
"loss": 1.6439,
"step": 235
},
{
"epoch": 0.24,
"learning_rate": 0.00951661902861773,
"loss": 1.6982,
"step": 240
},
{
"epoch": 0.24,
"learning_rate": 0.009615239476408233,
"loss": 1.857,
"step": 245
},
{
"epoch": 0.25,
"learning_rate": 0.009712858623572643,
"loss": 1.5612,
"step": 250
},
{
"epoch": 0.25,
"learning_rate": 0.009809506363438459,
"loss": 1.4843,
"step": 255
},
{
"epoch": 0.26,
"learning_rate": 0.009905211130872971,
"loss": 1.81,
"step": 260
},
{
"epoch": 0.26,
"learning_rate": 0.01,
"loss": 1.5551,
"step": 265
},
{
"epoch": 0.27,
"learning_rate": 0.009971799492385787,
"loss": 1.8817,
"step": 270
},
{
"epoch": 0.27,
"learning_rate": 0.009943598984771575,
"loss": 1.7807,
"step": 275
},
{
"epoch": 0.28,
"learning_rate": 0.009915398477157361,
"loss": 1.7788,
"step": 280
},
{
"epoch": 0.28,
"learning_rate": 0.009887197969543148,
"loss": 1.9268,
"step": 285
},
{
"epoch": 0.29,
"learning_rate": 0.009858997461928934,
"loss": 1.767,
"step": 290
},
{
"epoch": 0.29,
"learning_rate": 0.00983079695431472,
"loss": 2.0681,
"step": 295
},
{
"epoch": 0.3,
"learning_rate": 0.009802596446700508,
"loss": 1.8078,
"step": 300
},
{
"epoch": 0.3,
"learning_rate": 0.009774395939086295,
"loss": 1.5819,
"step": 305
},
{
"epoch": 0.31,
"learning_rate": 0.009746195431472081,
"loss": 1.3608,
"step": 310
},
{
"epoch": 0.31,
"learning_rate": 0.00971799492385787,
"loss": 1.4035,
"step": 315
},
{
"epoch": 0.32,
"learning_rate": 0.009689794416243654,
"loss": 1.7838,
"step": 320
},
{
"epoch": 0.32,
"learning_rate": 0.009661593908629442,
"loss": 1.462,
"step": 325
},
{
"epoch": 0.33,
"learning_rate": 0.009633393401015228,
"loss": 1.8537,
"step": 330
},
{
"epoch": 0.33,
"learning_rate": 0.009605192893401015,
"loss": 1.6035,
"step": 335
},
{
"epoch": 0.34,
"learning_rate": 0.009576992385786803,
"loss": 1.7534,
"step": 340
},
{
"epoch": 0.34,
"learning_rate": 0.00954879187817259,
"loss": 1.5117,
"step": 345
},
{
"epoch": 0.35,
"learning_rate": 0.009520591370558376,
"loss": 1.6606,
"step": 350
},
{
"epoch": 0.35,
"learning_rate": 0.009492390862944162,
"loss": 2.2899,
"step": 355
},
{
"epoch": 0.36,
"learning_rate": 0.009464190355329949,
"loss": 1.6254,
"step": 360
},
{
"epoch": 0.36,
"learning_rate": 0.009435989847715737,
"loss": 1.7699,
"step": 365
},
{
"epoch": 0.37,
"learning_rate": 0.009407789340101523,
"loss": 1.5836,
"step": 370
},
{
"epoch": 0.37,
"learning_rate": 0.00937958883248731,
"loss": 1.9447,
"step": 375
},
{
"epoch": 0.38,
"learning_rate": 0.009351388324873098,
"loss": 1.7334,
"step": 380
},
{
"epoch": 0.38,
"learning_rate": 0.009323187817258882,
"loss": 1.5246,
"step": 385
},
{
"epoch": 0.39,
"learning_rate": 0.00929498730964467,
"loss": 1.7258,
"step": 390
},
{
"epoch": 0.39,
"learning_rate": 0.009266786802030457,
"loss": 1.6757,
"step": 395
},
{
"epoch": 0.4,
"learning_rate": 0.009238586294416243,
"loss": 1.7912,
"step": 400
},
{
"epoch": 0.4,
"learning_rate": 0.009210385786802031,
"loss": 1.8435,
"step": 405
},
{
"epoch": 0.41,
"learning_rate": 0.009182185279187818,
"loss": 2.0267,
"step": 410
},
{
"epoch": 0.41,
"learning_rate": 0.009153984771573604,
"loss": 1.6059,
"step": 415
},
{
"epoch": 0.42,
"learning_rate": 0.00912578426395939,
"loss": 1.7753,
"step": 420
},
{
"epoch": 0.42,
"learning_rate": 0.009097583756345177,
"loss": 1.988,
"step": 425
},
{
"epoch": 0.43,
"learning_rate": 0.009069383248730965,
"loss": 1.5201,
"step": 430
},
{
"epoch": 0.43,
"learning_rate": 0.009041182741116751,
"loss": 1.397,
"step": 435
},
{
"epoch": 0.44,
"learning_rate": 0.009012982233502538,
"loss": 1.7266,
"step": 440
},
{
"epoch": 0.44,
"learning_rate": 0.008984781725888326,
"loss": 1.6747,
"step": 445
},
{
"epoch": 0.45,
"learning_rate": 0.008956581218274112,
"loss": 1.5811,
"step": 450
},
{
"epoch": 0.45,
"learning_rate": 0.008928380710659899,
"loss": 1.4325,
"step": 455
},
{
"epoch": 0.46,
"learning_rate": 0.008900180203045685,
"loss": 1.9487,
"step": 460
},
{
"epoch": 0.46,
"learning_rate": 0.008871979695431471,
"loss": 1.4969,
"step": 465
},
{
"epoch": 0.47,
"learning_rate": 0.00884377918781726,
"loss": 1.6207,
"step": 470
},
{
"epoch": 0.47,
"learning_rate": 0.008815578680203046,
"loss": 1.9026,
"step": 475
},
{
"epoch": 0.48,
"learning_rate": 0.008787378172588832,
"loss": 1.6797,
"step": 480
},
{
"epoch": 0.48,
"learning_rate": 0.00875917766497462,
"loss": 1.5079,
"step": 485
},
{
"epoch": 0.49,
"learning_rate": 0.008730977157360405,
"loss": 1.5912,
"step": 490
},
{
"epoch": 0.49,
"learning_rate": 0.008702776649746193,
"loss": 1.706,
"step": 495
},
{
"epoch": 0.5,
"learning_rate": 0.00867457614213198,
"loss": 1.6559,
"step": 500
},
{
"epoch": 0.5,
"learning_rate": 0.008646375634517766,
"loss": 1.6109,
"step": 505
},
{
"epoch": 0.51,
"learning_rate": 0.008618175126903554,
"loss": 1.9024,
"step": 510
},
{
"epoch": 0.51,
"learning_rate": 0.00858997461928934,
"loss": 1.4508,
"step": 515
},
{
"epoch": 0.52,
"learning_rate": 0.008561774111675127,
"loss": 1.7011,
"step": 520
},
{
"epoch": 0.52,
"learning_rate": 0.008533573604060913,
"loss": 1.4233,
"step": 525
},
{
"epoch": 0.53,
"learning_rate": 0.0085053730964467,
"loss": 1.5886,
"step": 530
},
{
"epoch": 0.53,
"learning_rate": 0.008477172588832488,
"loss": 1.6088,
"step": 535
},
{
"epoch": 0.54,
"learning_rate": 0.008448972081218274,
"loss": 2.3507,
"step": 540
},
{
"epoch": 0.54,
"learning_rate": 0.00842077157360406,
"loss": 1.457,
"step": 545
},
{
"epoch": 0.55,
"learning_rate": 0.008392571065989849,
"loss": 1.7145,
"step": 550
},
{
"epoch": 0.55,
"learning_rate": 0.008364370558375635,
"loss": 1.6431,
"step": 555
},
{
"epoch": 0.56,
"learning_rate": 0.008336170050761421,
"loss": 1.6156,
"step": 560
},
{
"epoch": 0.56,
"learning_rate": 0.008307969543147208,
"loss": 1.4988,
"step": 565
},
{
"epoch": 0.57,
"learning_rate": 0.008279769035532994,
"loss": 1.6655,
"step": 570
},
{
"epoch": 0.57,
"learning_rate": 0.008251568527918782,
"loss": 1.3964,
"step": 575
},
{
"epoch": 0.58,
"learning_rate": 0.008223368020304569,
"loss": 1.5442,
"step": 580
},
{
"epoch": 0.58,
"learning_rate": 0.008195167512690355,
"loss": 1.576,
"step": 585
},
{
"epoch": 0.59,
"learning_rate": 0.008166967005076143,
"loss": 1.6838,
"step": 590
},
{
"epoch": 0.59,
"learning_rate": 0.008138766497461928,
"loss": 1.5472,
"step": 595
},
{
"epoch": 0.6,
"learning_rate": 0.008110565989847716,
"loss": 1.6603,
"step": 600
},
{
"epoch": 0.6,
"learning_rate": 0.008082365482233502,
"loss": 1.8896,
"step": 605
},
{
"epoch": 0.61,
"learning_rate": 0.008054164974619289,
"loss": 1.3879,
"step": 610
},
{
"epoch": 0.61,
"learning_rate": 0.008025964467005077,
"loss": 1.7506,
"step": 615
},
{
"epoch": 0.62,
"learning_rate": 0.007997763959390863,
"loss": 1.6107,
"step": 620
},
{
"epoch": 0.62,
"learning_rate": 0.00796956345177665,
"loss": 1.6015,
"step": 625
},
{
"epoch": 0.63,
"learning_rate": 0.007941362944162436,
"loss": 1.4905,
"step": 630
},
{
"epoch": 0.63,
"learning_rate": 0.007913162436548223,
"loss": 1.7908,
"step": 635
},
{
"epoch": 0.64,
"learning_rate": 0.00788496192893401,
"loss": 1.5734,
"step": 640
},
{
"epoch": 0.64,
"learning_rate": 0.007856761421319797,
"loss": 1.4382,
"step": 645
},
{
"epoch": 0.65,
"learning_rate": 0.007828560913705583,
"loss": 1.5483,
"step": 650
},
{
"epoch": 0.65,
"learning_rate": 0.00780036040609137,
"loss": 1.4697,
"step": 655
},
{
"epoch": 0.66,
"learning_rate": 0.007772159898477159,
"loss": 1.5878,
"step": 660
},
{
"epoch": 0.66,
"learning_rate": 0.007743959390862944,
"loss": 1.9756,
"step": 665
},
{
"epoch": 0.67,
"learning_rate": 0.007715758883248732,
"loss": 1.6656,
"step": 670
},
{
"epoch": 0.67,
"learning_rate": 0.007687558375634519,
"loss": 1.6744,
"step": 675
},
{
"epoch": 0.68,
"learning_rate": 0.007659357868020306,
"loss": 1.762,
"step": 680
},
{
"epoch": 0.68,
"learning_rate": 0.0076311573604060925,
"loss": 1.6982,
"step": 685
},
{
"epoch": 0.69,
"learning_rate": 0.007602956852791878,
"loss": 1.4908,
"step": 690
},
{
"epoch": 0.69,
"learning_rate": 0.007574756345177666,
"loss": 1.6065,
"step": 695
},
{
"epoch": 0.7,
"learning_rate": 0.0075465558375634526,
"loss": 1.2293,
"step": 700
},
{
"epoch": 0.7,
"learning_rate": 0.007518355329949239,
"loss": 1.6052,
"step": 705
},
{
"epoch": 0.71,
"learning_rate": 0.007490154822335025,
"loss": 1.3118,
"step": 710
},
{
"epoch": 0.71,
"learning_rate": 0.0074619543147208135,
"loss": 1.1742,
"step": 715
},
{
"epoch": 0.72,
"learning_rate": 0.0074337538071066,
"loss": 1.632,
"step": 720
},
{
"epoch": 0.72,
"learning_rate": 0.007405553299492385,
"loss": 1.6181,
"step": 725
},
{
"epoch": 0.73,
"learning_rate": 0.007377352791878174,
"loss": 1.6726,
"step": 730
},
{
"epoch": 0.73,
"learning_rate": 0.00734915228426396,
"loss": 1.5759,
"step": 735
},
{
"epoch": 0.74,
"learning_rate": 0.007320951776649747,
"loss": 1.3197,
"step": 740
},
{
"epoch": 0.74,
"learning_rate": 0.0072927512690355335,
"loss": 1.8665,
"step": 745
},
{
"epoch": 0.75,
"learning_rate": 0.007264550761421322,
"loss": 1.1938,
"step": 750
},
{
"epoch": 0.75,
"learning_rate": 0.007236350253807108,
"loss": 1.2782,
"step": 755
},
{
"epoch": 0.76,
"learning_rate": 0.0072081497461928936,
"loss": 1.6276,
"step": 760
},
{
"epoch": 0.76,
"learning_rate": 0.007179949238578682,
"loss": 1.5382,
"step": 765
},
{
"epoch": 0.77,
"learning_rate": 0.007151748730964467,
"loss": 1.5528,
"step": 770
},
{
"epoch": 0.77,
"learning_rate": 0.0071235482233502545,
"loss": 1.663,
"step": 775
},
{
"epoch": 0.78,
"learning_rate": 0.007095347715736042,
"loss": 1.5098,
"step": 780
},
{
"epoch": 0.78,
"learning_rate": 0.007067147208121828,
"loss": 1.781,
"step": 785
},
{
"epoch": 0.79,
"learning_rate": 0.007038946700507615,
"loss": 1.3787,
"step": 790
},
{
"epoch": 0.79,
"learning_rate": 0.007010746192893401,
"loss": 2.1853,
"step": 795
},
{
"epoch": 0.8,
"learning_rate": 0.006982545685279189,
"loss": 1.6639,
"step": 800
},
{
"epoch": 0.8,
"learning_rate": 0.006954345177664975,
"loss": 1.7152,
"step": 805
},
{
"epoch": 0.81,
"learning_rate": 0.006926144670050761,
"loss": 1.8148,
"step": 810
},
{
"epoch": 0.81,
"learning_rate": 0.006897944162436549,
"loss": 1.6564,
"step": 815
},
{
"epoch": 0.82,
"learning_rate": 0.0068697436548223354,
"loss": 1.3046,
"step": 820
},
{
"epoch": 0.82,
"learning_rate": 0.006841543147208123,
"loss": 1.3462,
"step": 825
},
{
"epoch": 0.83,
"learning_rate": 0.006813342639593909,
"loss": 1.4969,
"step": 830
},
{
"epoch": 0.83,
"learning_rate": 0.006785142131979697,
"loss": 1.7367,
"step": 835
},
{
"epoch": 0.84,
"learning_rate": 0.006756941624365483,
"loss": 1.5385,
"step": 840
},
{
"epoch": 0.84,
"learning_rate": 0.00672874111675127,
"loss": 1.5918,
"step": 845
},
{
"epoch": 0.85,
"learning_rate": 0.006700540609137057,
"loss": 1.4092,
"step": 850
},
{
"epoch": 0.85,
"learning_rate": 0.006672340101522844,
"loss": 1.6609,
"step": 855
},
{
"epoch": 0.86,
"learning_rate": 0.006644139593908631,
"loss": 1.6529,
"step": 860
},
{
"epoch": 0.86,
"learning_rate": 0.006615939086294417,
"loss": 2.0543,
"step": 865
},
{
"epoch": 0.87,
"learning_rate": 0.0065877385786802046,
"loss": 1.7507,
"step": 870
},
{
"epoch": 0.87,
"learning_rate": 0.00655953807106599,
"loss": 1.3364,
"step": 875
},
{
"epoch": 0.88,
"learning_rate": 0.0065313375634517765,
"loss": 1.5436,
"step": 880
},
{
"epoch": 0.88,
"learning_rate": 0.006503137055837565,
"loss": 1.5808,
"step": 885
},
{
"epoch": 0.89,
"learning_rate": 0.00647493654822335,
"loss": 1.1435,
"step": 890
},
{
"epoch": 0.89,
"learning_rate": 0.006446736040609138,
"loss": 1.7857,
"step": 895
},
{
"epoch": 0.9,
"learning_rate": 0.006418535532994924,
"loss": 1.506,
"step": 900
},
{
"epoch": 0.9,
"learning_rate": 0.006390335025380713,
"loss": 1.5276,
"step": 905
},
{
"epoch": 0.91,
"learning_rate": 0.006362134517766498,
"loss": 1.7656,
"step": 910
},
{
"epoch": 0.91,
"learning_rate": 0.006333934010152285,
"loss": 1.2465,
"step": 915
},
{
"epoch": 0.92,
"learning_rate": 0.006305733502538073,
"loss": 1.1414,
"step": 920
},
{
"epoch": 0.92,
"learning_rate": 0.006277532994923858,
"loss": 1.7263,
"step": 925
},
{
"epoch": 0.93,
"learning_rate": 0.0062493324873096456,
"loss": 1.6158,
"step": 930
},
{
"epoch": 0.93,
"learning_rate": 0.006221131979695433,
"loss": 1.3896,
"step": 935
},
{
"epoch": 0.94,
"learning_rate": 0.00619293147208122,
"loss": 1.8046,
"step": 940
},
{
"epoch": 0.94,
"learning_rate": 0.006164730964467006,
"loss": 1.6806,
"step": 945
},
{
"epoch": 0.95,
"learning_rate": 0.006136530456852793,
"loss": 1.7003,
"step": 950
},
{
"epoch": 0.95,
"learning_rate": 0.006108329949238579,
"loss": 1.4235,
"step": 955
},
{
"epoch": 0.96,
"learning_rate": 0.006080129441624366,
"loss": 1.5528,
"step": 960
},
{
"epoch": 0.96,
"learning_rate": 0.006051928934010154,
"loss": 1.6251,
"step": 965
},
{
"epoch": 0.97,
"learning_rate": 0.006023728426395939,
"loss": 1.7944,
"step": 970
},
{
"epoch": 0.97,
"learning_rate": 0.0059955279187817265,
"loss": 1.6361,
"step": 975
},
{
"epoch": 0.98,
"learning_rate": 0.005967327411167512,
"loss": 1.6142,
"step": 980
},
{
"epoch": 0.98,
"learning_rate": 0.0059391269035533,
"loss": 1.6014,
"step": 985
},
{
"epoch": 0.99,
"learning_rate": 0.005910926395939088,
"loss": 1.5756,
"step": 990
},
{
"epoch": 0.99,
"learning_rate": 0.005882725888324872,
"loss": 1.2877,
"step": 995
},
{
"epoch": 1.0,
"learning_rate": 0.005854525380710661,
"loss": 1.6494,
"step": 1000
},
{
"epoch": 1.0,
"learning_rate": 0.005826324873096447,
"loss": 1.4704,
"step": 1005
},
{
"epoch": 1.01,
"learning_rate": 0.005798124365482234,
"loss": 1.3735,
"step": 1010
},
{
"epoch": 1.01,
"learning_rate": 0.005769923857868021,
"loss": 1.3836,
"step": 1015
},
{
"epoch": 1.02,
"learning_rate": 0.005741723350253808,
"loss": 1.4496,
"step": 1020
},
{
"epoch": 1.02,
"learning_rate": 0.005713522842639595,
"loss": 1.4613,
"step": 1025
},
{
"epoch": 1.03,
"learning_rate": 0.00568532233502538,
"loss": 1.4161,
"step": 1030
},
{
"epoch": 1.03,
"learning_rate": 0.0056571218274111676,
"loss": 1.592,
"step": 1035
},
{
"epoch": 1.04,
"learning_rate": 0.005628921319796955,
"loss": 1.585,
"step": 1040
},
{
"epoch": 1.04,
"learning_rate": 0.005600720812182741,
"loss": 1.2444,
"step": 1045
},
{
"epoch": 1.05,
"learning_rate": 0.005572520304568528,