sup-simcse-roberta-large / trainer_state.json
Tianyu Gao
init
ff26529
{
"best_metric": 0.8821608385370503,
"best_model_checkpoint": "result/roberta-large-bs512-lr1e-5",
"epoch": 3.0,
"global_step": 1617,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 9.938157081014226e-06,
"loss": 7.003,
"step": 10
},
{
"epoch": 0.04,
"learning_rate": 9.876314162028449e-06,
"loss": 6.8949,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 9.814471243042673e-06,
"loss": 5.0525,
"step": 30
},
{
"epoch": 0.07,
"learning_rate": 9.752628324056896e-06,
"loss": 2.595,
"step": 40
},
{
"epoch": 0.09,
"learning_rate": 9.690785405071121e-06,
"loss": 1.866,
"step": 50
},
{
"epoch": 0.11,
"learning_rate": 9.628942486085344e-06,
"loss": 1.5908,
"step": 60
},
{
"epoch": 0.13,
"learning_rate": 9.567099567099569e-06,
"loss": 1.399,
"step": 70
},
{
"epoch": 0.15,
"learning_rate": 9.505256648113792e-06,
"loss": 1.284,
"step": 80
},
{
"epoch": 0.17,
"learning_rate": 9.443413729128017e-06,
"loss": 1.2112,
"step": 90
},
{
"epoch": 0.19,
"learning_rate": 9.38157081014224e-06,
"loss": 1.1033,
"step": 100
},
{
"epoch": 0.2,
"learning_rate": 9.319727891156464e-06,
"loss": 1.1478,
"step": 110
},
{
"epoch": 0.22,
"learning_rate": 9.257884972170687e-06,
"loss": 1.1024,
"step": 120
},
{
"epoch": 0.23,
"eval_avg_sts": 0.8464629190472637,
"eval_sickr_spearman": 0.8379469933567055,
"eval_stsb_spearman": 0.8549788447378217,
"step": 125
},
{
"epoch": 0.24,
"learning_rate": 9.196042053184912e-06,
"loss": 1.057,
"step": 130
},
{
"epoch": 0.26,
"learning_rate": 9.134199134199135e-06,
"loss": 1.034,
"step": 140
},
{
"epoch": 0.28,
"learning_rate": 9.07235621521336e-06,
"loss": 0.9599,
"step": 150
},
{
"epoch": 0.3,
"learning_rate": 9.010513296227583e-06,
"loss": 0.9968,
"step": 160
},
{
"epoch": 0.32,
"learning_rate": 8.948670377241808e-06,
"loss": 0.9707,
"step": 170
},
{
"epoch": 0.33,
"learning_rate": 8.88682745825603e-06,
"loss": 0.9783,
"step": 180
},
{
"epoch": 0.35,
"learning_rate": 8.824984539270254e-06,
"loss": 0.9017,
"step": 190
},
{
"epoch": 0.37,
"learning_rate": 8.763141620284478e-06,
"loss": 0.9256,
"step": 200
},
{
"epoch": 0.39,
"learning_rate": 8.701298701298701e-06,
"loss": 0.9103,
"step": 210
},
{
"epoch": 0.41,
"learning_rate": 8.639455782312926e-06,
"loss": 0.9234,
"step": 220
},
{
"epoch": 0.43,
"learning_rate": 8.577612863327149e-06,
"loss": 0.8912,
"step": 230
},
{
"epoch": 0.45,
"learning_rate": 8.515769944341374e-06,
"loss": 0.9209,
"step": 240
},
{
"epoch": 0.46,
"learning_rate": 8.453927025355597e-06,
"loss": 0.9003,
"step": 250
},
{
"epoch": 0.46,
"eval_avg_sts": 0.8553756123241436,
"eval_sickr_spearman": 0.8419504817237752,
"eval_stsb_spearman": 0.868800742924512,
"step": 250
},
{
"epoch": 0.48,
"learning_rate": 8.392084106369822e-06,
"loss": 0.8702,
"step": 260
},
{
"epoch": 0.5,
"learning_rate": 8.330241187384045e-06,
"loss": 0.8455,
"step": 270
},
{
"epoch": 0.52,
"learning_rate": 8.26839826839827e-06,
"loss": 0.8675,
"step": 280
},
{
"epoch": 0.54,
"learning_rate": 8.206555349412492e-06,
"loss": 0.8445,
"step": 290
},
{
"epoch": 0.56,
"learning_rate": 8.144712430426717e-06,
"loss": 0.8564,
"step": 300
},
{
"epoch": 0.58,
"learning_rate": 8.08286951144094e-06,
"loss": 0.8662,
"step": 310
},
{
"epoch": 0.59,
"learning_rate": 8.021026592455165e-06,
"loss": 0.8236,
"step": 320
},
{
"epoch": 0.61,
"learning_rate": 7.959183673469388e-06,
"loss": 0.8335,
"step": 330
},
{
"epoch": 0.63,
"learning_rate": 7.897340754483613e-06,
"loss": 0.8348,
"step": 340
},
{
"epoch": 0.65,
"learning_rate": 7.835497835497836e-06,
"loss": 0.8421,
"step": 350
},
{
"epoch": 0.67,
"learning_rate": 7.77365491651206e-06,
"loss": 0.8758,
"step": 360
},
{
"epoch": 0.69,
"learning_rate": 7.711811997526283e-06,
"loss": 0.8321,
"step": 370
},
{
"epoch": 0.7,
"eval_avg_sts": 0.8587296735997011,
"eval_sickr_spearman": 0.8432452561252423,
"eval_stsb_spearman": 0.8742140910741599,
"step": 375
},
{
"epoch": 0.71,
"learning_rate": 7.649969078540508e-06,
"loss": 0.8205,
"step": 380
},
{
"epoch": 0.72,
"learning_rate": 7.588126159554732e-06,
"loss": 0.8042,
"step": 390
},
{
"epoch": 0.74,
"learning_rate": 7.526283240568956e-06,
"loss": 0.8103,
"step": 400
},
{
"epoch": 0.76,
"learning_rate": 7.46444032158318e-06,
"loss": 0.8534,
"step": 410
},
{
"epoch": 0.78,
"learning_rate": 7.402597402597404e-06,
"loss": 0.7816,
"step": 420
},
{
"epoch": 0.8,
"learning_rate": 7.3407544836116275e-06,
"loss": 0.8084,
"step": 430
},
{
"epoch": 0.82,
"learning_rate": 7.278911564625851e-06,
"loss": 0.8036,
"step": 440
},
{
"epoch": 0.83,
"learning_rate": 7.217068645640075e-06,
"loss": 0.7804,
"step": 450
},
{
"epoch": 0.85,
"learning_rate": 7.155225726654299e-06,
"loss": 0.7832,
"step": 460
},
{
"epoch": 0.87,
"learning_rate": 7.093382807668523e-06,
"loss": 0.7861,
"step": 470
},
{
"epoch": 0.89,
"learning_rate": 7.031539888682747e-06,
"loss": 0.7737,
"step": 480
},
{
"epoch": 0.91,
"learning_rate": 6.969696969696971e-06,
"loss": 0.7552,
"step": 490
},
{
"epoch": 0.93,
"learning_rate": 6.907854050711195e-06,
"loss": 0.7564,
"step": 500
},
{
"epoch": 0.93,
"eval_avg_sts": 0.8603591998171731,
"eval_sickr_spearman": 0.843610964931592,
"eval_stsb_spearman": 0.8771074347027543,
"step": 500
},
{
"epoch": 0.95,
"learning_rate": 6.8460111317254185e-06,
"loss": 0.7929,
"step": 510
},
{
"epoch": 0.96,
"learning_rate": 6.7841682127396415e-06,
"loss": 0.7598,
"step": 520
},
{
"epoch": 0.98,
"learning_rate": 6.722325293753865e-06,
"loss": 0.7595,
"step": 530
},
{
"epoch": 1.0,
"learning_rate": 6.660482374768089e-06,
"loss": 0.7499,
"step": 540
},
{
"epoch": 1.02,
"learning_rate": 6.598639455782313e-06,
"loss": 0.7149,
"step": 550
},
{
"epoch": 1.04,
"learning_rate": 6.536796536796537e-06,
"loss": 0.7249,
"step": 560
},
{
"epoch": 1.06,
"learning_rate": 6.474953617810761e-06,
"loss": 0.6999,
"step": 570
},
{
"epoch": 1.08,
"learning_rate": 6.413110698824985e-06,
"loss": 0.731,
"step": 580
},
{
"epoch": 1.09,
"learning_rate": 6.351267779839209e-06,
"loss": 0.7133,
"step": 590
},
{
"epoch": 1.11,
"learning_rate": 6.2894248608534325e-06,
"loss": 0.707,
"step": 600
},
{
"epoch": 1.13,
"learning_rate": 6.227581941867656e-06,
"loss": 0.7111,
"step": 610
},
{
"epoch": 1.15,
"learning_rate": 6.16573902288188e-06,
"loss": 0.7196,
"step": 620
},
{
"epoch": 1.16,
"eval_avg_sts": 0.8600562169011113,
"eval_sickr_spearman": 0.8425868457867287,
"eval_stsb_spearman": 0.877525588015494,
"step": 625
},
{
"epoch": 1.17,
"learning_rate": 6.103896103896104e-06,
"loss": 0.7315,
"step": 630
},
{
"epoch": 1.19,
"learning_rate": 6.042053184910328e-06,
"loss": 0.6952,
"step": 640
},
{
"epoch": 1.21,
"learning_rate": 5.980210265924552e-06,
"loss": 0.7086,
"step": 650
},
{
"epoch": 1.22,
"learning_rate": 5.918367346938776e-06,
"loss": 0.7067,
"step": 660
},
{
"epoch": 1.24,
"learning_rate": 5.856524427953e-06,
"loss": 0.7467,
"step": 670
},
{
"epoch": 1.26,
"learning_rate": 5.7946815089672236e-06,
"loss": 0.6976,
"step": 680
},
{
"epoch": 1.28,
"learning_rate": 5.7328385899814474e-06,
"loss": 0.7185,
"step": 690
},
{
"epoch": 1.3,
"learning_rate": 5.670995670995671e-06,
"loss": 0.7138,
"step": 700
},
{
"epoch": 1.32,
"learning_rate": 5.609152752009895e-06,
"loss": 0.7343,
"step": 710
},
{
"epoch": 1.34,
"learning_rate": 5.547309833024119e-06,
"loss": 0.6987,
"step": 720
},
{
"epoch": 1.35,
"learning_rate": 5.485466914038343e-06,
"loss": 0.7019,
"step": 730
},
{
"epoch": 1.37,
"learning_rate": 5.423623995052567e-06,
"loss": 0.6916,
"step": 740
},
{
"epoch": 1.39,
"learning_rate": 5.361781076066791e-06,
"loss": 0.6876,
"step": 750
},
{
"epoch": 1.39,
"eval_avg_sts": 0.8606504994760098,
"eval_sickr_spearman": 0.8403676167758821,
"eval_stsb_spearman": 0.8809333821761376,
"step": 750
},
{
"epoch": 1.41,
"learning_rate": 5.2999381570810146e-06,
"loss": 0.6952,
"step": 760
},
{
"epoch": 1.43,
"learning_rate": 5.2380952380952384e-06,
"loss": 0.7006,
"step": 770
},
{
"epoch": 1.45,
"learning_rate": 5.176252319109462e-06,
"loss": 0.6783,
"step": 780
},
{
"epoch": 1.47,
"learning_rate": 5.114409400123686e-06,
"loss": 0.67,
"step": 790
},
{
"epoch": 1.48,
"learning_rate": 5.05256648113791e-06,
"loss": 0.6965,
"step": 800
},
{
"epoch": 1.5,
"learning_rate": 4.990723562152134e-06,
"loss": 0.7184,
"step": 810
},
{
"epoch": 1.52,
"learning_rate": 4.928880643166358e-06,
"loss": 0.7028,
"step": 820
},
{
"epoch": 1.54,
"learning_rate": 4.867037724180582e-06,
"loss": 0.7018,
"step": 830
},
{
"epoch": 1.56,
"learning_rate": 4.805194805194806e-06,
"loss": 0.6755,
"step": 840
},
{
"epoch": 1.58,
"learning_rate": 4.7433518862090295e-06,
"loss": 0.7275,
"step": 850
},
{
"epoch": 1.6,
"learning_rate": 4.681508967223253e-06,
"loss": 0.7079,
"step": 860
},
{
"epoch": 1.61,
"learning_rate": 4.619666048237477e-06,
"loss": 0.6657,
"step": 870
},
{
"epoch": 1.62,
"eval_avg_sts": 0.8619941787427801,
"eval_sickr_spearman": 0.8436138467976785,
"eval_stsb_spearman": 0.8803745106878816,
"step": 875
},
{
"epoch": 1.63,
"learning_rate": 4.557823129251701e-06,
"loss": 0.6632,
"step": 880
},
{
"epoch": 1.65,
"learning_rate": 4.495980210265925e-06,
"loss": 0.7117,
"step": 890
},
{
"epoch": 1.67,
"learning_rate": 4.434137291280149e-06,
"loss": 0.6729,
"step": 900
},
{
"epoch": 1.69,
"learning_rate": 4.372294372294373e-06,
"loss": 0.6621,
"step": 910
},
{
"epoch": 1.71,
"learning_rate": 4.310451453308597e-06,
"loss": 0.7047,
"step": 920
},
{
"epoch": 1.73,
"learning_rate": 4.2486085343228205e-06,
"loss": 0.69,
"step": 930
},
{
"epoch": 1.74,
"learning_rate": 4.186765615337044e-06,
"loss": 0.6909,
"step": 940
},
{
"epoch": 1.76,
"learning_rate": 4.124922696351268e-06,
"loss": 0.6897,
"step": 950
},
{
"epoch": 1.78,
"learning_rate": 4.063079777365492e-06,
"loss": 0.7043,
"step": 960
},
{
"epoch": 1.8,
"learning_rate": 4.001236858379716e-06,
"loss": 0.6819,
"step": 970
},
{
"epoch": 1.82,
"learning_rate": 3.93939393939394e-06,
"loss": 0.6995,
"step": 980
},
{
"epoch": 1.84,
"learning_rate": 3.877551020408164e-06,
"loss": 0.6816,
"step": 990
},
{
"epoch": 1.86,
"learning_rate": 3.815708101422388e-06,
"loss": 0.7123,
"step": 1000
},
{
"epoch": 1.86,
"eval_avg_sts": 0.8625810724399183,
"eval_sickr_spearman": 0.8440728800341225,
"eval_stsb_spearman": 0.8810892648457143,
"step": 1000
},
{
"epoch": 1.87,
"learning_rate": 3.753865182436611e-06,
"loss": 0.6697,
"step": 1010
},
{
"epoch": 1.89,
"learning_rate": 3.6920222634508353e-06,
"loss": 0.6597,
"step": 1020
},
{
"epoch": 1.91,
"learning_rate": 3.6301793444650592e-06,
"loss": 0.6619,
"step": 1030
},
{
"epoch": 1.93,
"learning_rate": 3.568336425479283e-06,
"loss": 0.6794,
"step": 1040
},
{
"epoch": 1.95,
"learning_rate": 3.506493506493507e-06,
"loss": 0.6842,
"step": 1050
},
{
"epoch": 1.97,
"learning_rate": 3.444650587507731e-06,
"loss": 0.6537,
"step": 1060
},
{
"epoch": 1.99,
"learning_rate": 3.3828076685219547e-06,
"loss": 0.6512,
"step": 1070
},
{
"epoch": 2.0,
"learning_rate": 3.3209647495361786e-06,
"loss": 0.6451,
"step": 1080
},
{
"epoch": 2.02,
"learning_rate": 3.2591218305504025e-06,
"loss": 0.6514,
"step": 1090
},
{
"epoch": 2.04,
"learning_rate": 3.1972789115646264e-06,
"loss": 0.6599,
"step": 1100
},
{
"epoch": 2.06,
"learning_rate": 3.1354359925788502e-06,
"loss": 0.6226,
"step": 1110
},
{
"epoch": 2.08,
"learning_rate": 3.0735930735930737e-06,
"loss": 0.6623,
"step": 1120
},
{
"epoch": 2.09,
"eval_avg_sts": 0.8592687624968119,
"eval_sickr_spearman": 0.8376155307256811,
"eval_stsb_spearman": 0.8809219942679428,
"step": 1125
},
{
"epoch": 2.1,
"learning_rate": 3.0117501546072976e-06,
"loss": 0.6179,
"step": 1130
},
{
"epoch": 2.12,
"learning_rate": 2.9499072356215214e-06,
"loss": 0.6425,
"step": 1140
},
{
"epoch": 2.13,
"learning_rate": 2.8880643166357453e-06,
"loss": 0.6601,
"step": 1150
},
{
"epoch": 2.15,
"learning_rate": 2.826221397649969e-06,
"loss": 0.6514,
"step": 1160
},
{
"epoch": 2.17,
"learning_rate": 2.764378478664193e-06,
"loss": 0.624,
"step": 1170
},
{
"epoch": 2.19,
"learning_rate": 2.702535559678417e-06,
"loss": 0.6375,
"step": 1180
},
{
"epoch": 2.21,
"learning_rate": 2.640692640692641e-06,
"loss": 0.6219,
"step": 1190
},
{
"epoch": 2.23,
"learning_rate": 2.5788497217068647e-06,
"loss": 0.6488,
"step": 1200
},
{
"epoch": 2.24,
"learning_rate": 2.5170068027210886e-06,
"loss": 0.6602,
"step": 1210
},
{
"epoch": 2.26,
"learning_rate": 2.4551638837353125e-06,
"loss": 0.6447,
"step": 1220
},
{
"epoch": 2.28,
"learning_rate": 2.3933209647495363e-06,
"loss": 0.6357,
"step": 1230
},
{
"epoch": 2.3,
"learning_rate": 2.33147804576376e-06,
"loss": 0.6316,
"step": 1240
},
{
"epoch": 2.32,
"learning_rate": 2.269635126777984e-06,
"loss": 0.6483,
"step": 1250
},
{
"epoch": 2.32,
"eval_avg_sts": 0.8583167194233854,
"eval_sickr_spearman": 0.8363792582057655,
"eval_stsb_spearman": 0.8802541806410051,
"step": 1250
},
{
"epoch": 2.34,
"learning_rate": 2.207792207792208e-06,
"loss": 0.6601,
"step": 1260
},
{
"epoch": 2.36,
"learning_rate": 2.145949288806432e-06,
"loss": 0.6228,
"step": 1270
},
{
"epoch": 2.37,
"learning_rate": 2.0841063698206557e-06,
"loss": 0.6383,
"step": 1280
},
{
"epoch": 2.39,
"learning_rate": 2.0222634508348796e-06,
"loss": 0.6035,
"step": 1290
},
{
"epoch": 2.41,
"learning_rate": 1.9604205318491035e-06,
"loss": 0.661,
"step": 1300
},
{
"epoch": 2.43,
"learning_rate": 1.8985776128633273e-06,
"loss": 0.6491,
"step": 1310
},
{
"epoch": 2.45,
"learning_rate": 1.8367346938775512e-06,
"loss": 0.6468,
"step": 1320
},
{
"epoch": 2.47,
"learning_rate": 1.774891774891775e-06,
"loss": 0.6262,
"step": 1330
},
{
"epoch": 2.49,
"learning_rate": 1.7130488559059988e-06,
"loss": 0.6289,
"step": 1340
},
{
"epoch": 2.5,
"learning_rate": 1.6512059369202226e-06,
"loss": 0.6346,
"step": 1350
},
{
"epoch": 2.52,
"learning_rate": 1.5893630179344465e-06,
"loss": 0.6487,
"step": 1360
},
{
"epoch": 2.54,
"learning_rate": 1.5275200989486704e-06,
"loss": 0.6476,
"step": 1370
},
{
"epoch": 2.55,
"eval_avg_sts": 0.8599184442252716,
"eval_sickr_spearman": 0.8376760499134931,
"eval_stsb_spearman": 0.8821608385370503,
"step": 1375
},
{
"epoch": 2.56,
"learning_rate": 1.4656771799628945e-06,
"loss": 0.6393,
"step": 1380
},
{
"epoch": 2.58,
"learning_rate": 1.4038342609771183e-06,
"loss": 0.6365,
"step": 1390
},
{
"epoch": 2.6,
"learning_rate": 1.3419913419913422e-06,
"loss": 0.6207,
"step": 1400
},
{
"epoch": 2.62,
"learning_rate": 1.280148423005566e-06,
"loss": 0.629,
"step": 1410
},
{
"epoch": 2.63,
"learning_rate": 1.2183055040197898e-06,
"loss": 0.6302,
"step": 1420
},
{
"epoch": 2.65,
"learning_rate": 1.1564625850340136e-06,
"loss": 0.6238,
"step": 1430
},
{
"epoch": 2.67,
"learning_rate": 1.0946196660482377e-06,
"loss": 0.6491,
"step": 1440
},
{
"epoch": 2.69,
"learning_rate": 1.0327767470624614e-06,
"loss": 0.6376,
"step": 1450
},
{
"epoch": 2.71,
"learning_rate": 9.709338280766853e-07,
"loss": 0.6378,
"step": 1460
},
{
"epoch": 2.73,
"learning_rate": 9.090909090909091e-07,
"loss": 0.6472,
"step": 1470
},
{
"epoch": 2.75,
"learning_rate": 8.472479901051331e-07,
"loss": 0.6385,
"step": 1480
},
{
"epoch": 2.76,
"learning_rate": 7.854050711193569e-07,
"loss": 0.6159,
"step": 1490
},
{
"epoch": 2.78,
"learning_rate": 7.235621521335808e-07,
"loss": 0.614,
"step": 1500
},
{
"epoch": 2.78,
"eval_avg_sts": 0.8592210832827014,
"eval_sickr_spearman": 0.8370235474004564,
"eval_stsb_spearman": 0.8814186191649465,
"step": 1500
},
{
"epoch": 2.8,
"learning_rate": 6.617192331478047e-07,
"loss": 0.6368,
"step": 1510
},
{
"epoch": 2.82,
"learning_rate": 5.998763141620285e-07,
"loss": 0.6098,
"step": 1520
},
{
"epoch": 2.84,
"learning_rate": 5.380333951762524e-07,
"loss": 0.6527,
"step": 1530
},
{
"epoch": 2.86,
"learning_rate": 4.7619047619047623e-07,
"loss": 0.6211,
"step": 1540
},
{
"epoch": 2.88,
"learning_rate": 4.143475572047001e-07,
"loss": 0.6258,
"step": 1550
},
{
"epoch": 2.89,
"learning_rate": 3.5250463821892393e-07,
"loss": 0.607,
"step": 1560
},
{
"epoch": 2.91,
"learning_rate": 2.9066171923314786e-07,
"loss": 0.6303,
"step": 1570
},
{
"epoch": 2.93,
"learning_rate": 2.288188002473717e-07,
"loss": 0.6144,
"step": 1580
},
{
"epoch": 2.95,
"learning_rate": 1.6697588126159556e-07,
"loss": 0.6223,
"step": 1590
},
{
"epoch": 2.97,
"learning_rate": 1.0513296227581942e-07,
"loss": 0.6236,
"step": 1600
},
{
"epoch": 2.99,
"learning_rate": 4.3290043290043295e-08,
"loss": 0.6555,
"step": 1610
},
{
"epoch": 3.0,
"step": 1617,
"train_runtime": 2073.0062,
"train_samples_per_second": 0.78
}
],
"max_steps": 1617,
"num_train_epochs": 3,
"total_flos": 169738863744319488,
"trial_name": null,
"trial_params": null
}