Weyaxi's picture
Upload folder using huggingface_hub
facd740
raw
history blame contribute delete
No virus
53.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8665302683836803,
"eval_steps": 500,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.000000000000001e-06,
"loss": 0.6869,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 8.000000000000001e-06,
"loss": 0.8396,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 1.2e-05,
"loss": 0.7489,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.7252,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 2e-05,
"loss": 0.6548,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 2.4e-05,
"loss": 0.8022,
"step": 6
},
{
"epoch": 0.01,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.6524,
"step": 7
},
{
"epoch": 0.02,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.6981,
"step": 8
},
{
"epoch": 0.02,
"learning_rate": 3.6e-05,
"loss": 0.7488,
"step": 9
},
{
"epoch": 0.02,
"learning_rate": 4e-05,
"loss": 0.6368,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.6891,
"step": 11
},
{
"epoch": 0.02,
"learning_rate": 4.8e-05,
"loss": 0.7968,
"step": 12
},
{
"epoch": 0.03,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.6912,
"step": 13
},
{
"epoch": 0.03,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.8452,
"step": 14
},
{
"epoch": 0.03,
"learning_rate": 6e-05,
"loss": 0.6989,
"step": 15
},
{
"epoch": 0.03,
"learning_rate": 6.400000000000001e-05,
"loss": 0.6685,
"step": 16
},
{
"epoch": 0.03,
"learning_rate": 6.800000000000001e-05,
"loss": 0.5469,
"step": 17
},
{
"epoch": 0.03,
"learning_rate": 7.2e-05,
"loss": 0.7915,
"step": 18
},
{
"epoch": 0.04,
"learning_rate": 7.6e-05,
"loss": 0.7744,
"step": 19
},
{
"epoch": 0.04,
"learning_rate": 8e-05,
"loss": 0.6804,
"step": 20
},
{
"epoch": 0.04,
"learning_rate": 8.4e-05,
"loss": 0.7796,
"step": 21
},
{
"epoch": 0.04,
"learning_rate": 8.800000000000001e-05,
"loss": 0.706,
"step": 22
},
{
"epoch": 0.04,
"learning_rate": 9.200000000000001e-05,
"loss": 0.6798,
"step": 23
},
{
"epoch": 0.05,
"learning_rate": 9.6e-05,
"loss": 0.6333,
"step": 24
},
{
"epoch": 0.05,
"learning_rate": 0.0001,
"loss": 0.6012,
"step": 25
},
{
"epoch": 0.05,
"learning_rate": 0.00010400000000000001,
"loss": 0.52,
"step": 26
},
{
"epoch": 0.05,
"learning_rate": 0.00010800000000000001,
"loss": 0.6583,
"step": 27
},
{
"epoch": 0.05,
"learning_rate": 0.00011200000000000001,
"loss": 0.7354,
"step": 28
},
{
"epoch": 0.06,
"learning_rate": 0.000116,
"loss": 0.6296,
"step": 29
},
{
"epoch": 0.06,
"learning_rate": 0.00012,
"loss": 0.6352,
"step": 30
},
{
"epoch": 0.06,
"learning_rate": 0.000124,
"loss": 0.6007,
"step": 31
},
{
"epoch": 0.06,
"learning_rate": 0.00012800000000000002,
"loss": 0.5659,
"step": 32
},
{
"epoch": 0.06,
"learning_rate": 0.000132,
"loss": 0.5138,
"step": 33
},
{
"epoch": 0.07,
"learning_rate": 0.00013600000000000003,
"loss": 0.6639,
"step": 34
},
{
"epoch": 0.07,
"learning_rate": 0.00014,
"loss": 0.5934,
"step": 35
},
{
"epoch": 0.07,
"learning_rate": 0.000144,
"loss": 0.5233,
"step": 36
},
{
"epoch": 0.07,
"learning_rate": 0.000148,
"loss": 0.5307,
"step": 37
},
{
"epoch": 0.07,
"learning_rate": 0.000152,
"loss": 0.5928,
"step": 38
},
{
"epoch": 0.08,
"learning_rate": 0.00015600000000000002,
"loss": 0.5908,
"step": 39
},
{
"epoch": 0.08,
"learning_rate": 0.00016,
"loss": 0.6366,
"step": 40
},
{
"epoch": 0.08,
"learning_rate": 0.000164,
"loss": 0.5972,
"step": 41
},
{
"epoch": 0.08,
"learning_rate": 0.000168,
"loss": 0.4825,
"step": 42
},
{
"epoch": 0.08,
"learning_rate": 0.000172,
"loss": 0.6783,
"step": 43
},
{
"epoch": 0.08,
"learning_rate": 0.00017600000000000002,
"loss": 0.6082,
"step": 44
},
{
"epoch": 0.09,
"learning_rate": 0.00018,
"loss": 0.7633,
"step": 45
},
{
"epoch": 0.09,
"learning_rate": 0.00018400000000000003,
"loss": 0.5988,
"step": 46
},
{
"epoch": 0.09,
"learning_rate": 0.000188,
"loss": 0.6658,
"step": 47
},
{
"epoch": 0.09,
"learning_rate": 0.000192,
"loss": 0.5945,
"step": 48
},
{
"epoch": 0.09,
"learning_rate": 0.000196,
"loss": 0.5984,
"step": 49
},
{
"epoch": 0.1,
"learning_rate": 0.0002,
"loss": 0.6778,
"step": 50
},
{
"epoch": 0.1,
"learning_rate": 0.00020400000000000003,
"loss": 0.6057,
"step": 51
},
{
"epoch": 0.1,
"learning_rate": 0.00020800000000000001,
"loss": 0.601,
"step": 52
},
{
"epoch": 0.1,
"learning_rate": 0.00021200000000000003,
"loss": 0.5566,
"step": 53
},
{
"epoch": 0.1,
"learning_rate": 0.00021600000000000002,
"loss": 0.5911,
"step": 54
},
{
"epoch": 0.11,
"learning_rate": 0.00022000000000000003,
"loss": 0.7636,
"step": 55
},
{
"epoch": 0.11,
"learning_rate": 0.00022400000000000002,
"loss": 0.5537,
"step": 56
},
{
"epoch": 0.11,
"learning_rate": 0.00022799999999999999,
"loss": 0.6037,
"step": 57
},
{
"epoch": 0.11,
"learning_rate": 0.000232,
"loss": 0.6474,
"step": 58
},
{
"epoch": 0.11,
"learning_rate": 0.000236,
"loss": 0.6483,
"step": 59
},
{
"epoch": 0.12,
"learning_rate": 0.00024,
"loss": 0.5021,
"step": 60
},
{
"epoch": 0.12,
"learning_rate": 0.000244,
"loss": 0.5347,
"step": 61
},
{
"epoch": 0.12,
"learning_rate": 0.000248,
"loss": 0.5791,
"step": 62
},
{
"epoch": 0.12,
"learning_rate": 0.000252,
"loss": 0.5407,
"step": 63
},
{
"epoch": 0.12,
"learning_rate": 0.00025600000000000004,
"loss": 0.5298,
"step": 64
},
{
"epoch": 0.13,
"learning_rate": 0.00026000000000000003,
"loss": 0.5685,
"step": 65
},
{
"epoch": 0.13,
"learning_rate": 0.000264,
"loss": 0.5108,
"step": 66
},
{
"epoch": 0.13,
"learning_rate": 0.000268,
"loss": 0.526,
"step": 67
},
{
"epoch": 0.13,
"learning_rate": 0.00027200000000000005,
"loss": 0.6843,
"step": 68
},
{
"epoch": 0.13,
"learning_rate": 0.000276,
"loss": 0.6608,
"step": 69
},
{
"epoch": 0.13,
"learning_rate": 0.00028,
"loss": 0.5866,
"step": 70
},
{
"epoch": 0.14,
"learning_rate": 0.000284,
"loss": 0.6422,
"step": 71
},
{
"epoch": 0.14,
"learning_rate": 0.000288,
"loss": 0.449,
"step": 72
},
{
"epoch": 0.14,
"learning_rate": 0.000292,
"loss": 0.5319,
"step": 73
},
{
"epoch": 0.14,
"learning_rate": 0.000296,
"loss": 0.5977,
"step": 74
},
{
"epoch": 0.14,
"learning_rate": 0.00030000000000000003,
"loss": 0.5805,
"step": 75
},
{
"epoch": 0.15,
"learning_rate": 0.000304,
"loss": 0.5209,
"step": 76
},
{
"epoch": 0.15,
"learning_rate": 0.000308,
"loss": 0.6098,
"step": 77
},
{
"epoch": 0.15,
"learning_rate": 0.00031200000000000005,
"loss": 0.4665,
"step": 78
},
{
"epoch": 0.15,
"learning_rate": 0.00031600000000000004,
"loss": 0.6882,
"step": 79
},
{
"epoch": 0.15,
"learning_rate": 0.00032,
"loss": 0.5427,
"step": 80
},
{
"epoch": 0.16,
"learning_rate": 0.000324,
"loss": 0.5345,
"step": 81
},
{
"epoch": 0.16,
"learning_rate": 0.000328,
"loss": 0.663,
"step": 82
},
{
"epoch": 0.16,
"learning_rate": 0.000332,
"loss": 0.5393,
"step": 83
},
{
"epoch": 0.16,
"learning_rate": 0.000336,
"loss": 0.5711,
"step": 84
},
{
"epoch": 0.16,
"learning_rate": 0.00034,
"loss": 0.5261,
"step": 85
},
{
"epoch": 0.17,
"learning_rate": 0.000344,
"loss": 0.5775,
"step": 86
},
{
"epoch": 0.17,
"learning_rate": 0.000348,
"loss": 0.6329,
"step": 87
},
{
"epoch": 0.17,
"learning_rate": 0.00035200000000000005,
"loss": 0.4425,
"step": 88
},
{
"epoch": 0.17,
"learning_rate": 0.00035600000000000003,
"loss": 0.6837,
"step": 89
},
{
"epoch": 0.17,
"learning_rate": 0.00036,
"loss": 0.615,
"step": 90
},
{
"epoch": 0.18,
"learning_rate": 0.000364,
"loss": 0.5615,
"step": 91
},
{
"epoch": 0.18,
"learning_rate": 0.00036800000000000005,
"loss": 0.5434,
"step": 92
},
{
"epoch": 0.18,
"learning_rate": 0.00037200000000000004,
"loss": 0.5864,
"step": 93
},
{
"epoch": 0.18,
"learning_rate": 0.000376,
"loss": 0.5583,
"step": 94
},
{
"epoch": 0.18,
"learning_rate": 0.00038,
"loss": 0.5299,
"step": 95
},
{
"epoch": 0.18,
"learning_rate": 0.000384,
"loss": 0.532,
"step": 96
},
{
"epoch": 0.19,
"learning_rate": 0.000388,
"loss": 0.5227,
"step": 97
},
{
"epoch": 0.19,
"learning_rate": 0.000392,
"loss": 0.5275,
"step": 98
},
{
"epoch": 0.19,
"learning_rate": 0.00039600000000000003,
"loss": 0.4541,
"step": 99
},
{
"epoch": 0.19,
"learning_rate": 0.0004,
"loss": 0.6485,
"step": 100
},
{
"epoch": 0.19,
"learning_rate": 0.0003999995350775973,
"loss": 0.5438,
"step": 101
},
{
"epoch": 0.2,
"learning_rate": 0.00039999814031255063,
"loss": 0.5997,
"step": 102
},
{
"epoch": 0.2,
"learning_rate": 0.00039999581571134455,
"loss": 0.5322,
"step": 103
},
{
"epoch": 0.2,
"learning_rate": 0.0003999925612847867,
"loss": 0.484,
"step": 104
},
{
"epoch": 0.2,
"learning_rate": 0.00039998837704800766,
"loss": 0.5961,
"step": 105
},
{
"epoch": 0.2,
"learning_rate": 0.00039998326302046085,
"loss": 0.7405,
"step": 106
},
{
"epoch": 0.21,
"learning_rate": 0.00039997721922592255,
"loss": 0.5802,
"step": 107
},
{
"epoch": 0.21,
"learning_rate": 0.00039997024569249167,
"loss": 0.769,
"step": 108
},
{
"epoch": 0.21,
"learning_rate": 0.0003999623424525898,
"loss": 0.5598,
"step": 109
},
{
"epoch": 0.21,
"learning_rate": 0.0003999535095429608,
"loss": 0.6143,
"step": 110
},
{
"epoch": 0.21,
"learning_rate": 0.00039994374700467095,
"loss": 0.5766,
"step": 111
},
{
"epoch": 0.22,
"learning_rate": 0.00039993305488310836,
"loss": 0.7695,
"step": 112
},
{
"epoch": 0.22,
"learning_rate": 0.0003999214332279831,
"loss": 0.7153,
"step": 113
},
{
"epoch": 0.22,
"learning_rate": 0.0003999088820933269,
"loss": 0.5835,
"step": 114
},
{
"epoch": 0.22,
"learning_rate": 0.00039989540153749286,
"loss": 0.6634,
"step": 115
},
{
"epoch": 0.22,
"learning_rate": 0.000399880991623155,
"loss": 0.6069,
"step": 116
},
{
"epoch": 0.23,
"learning_rate": 0.0003998656524173082,
"loss": 0.7224,
"step": 117
},
{
"epoch": 0.23,
"learning_rate": 0.000399849383991268,
"loss": 0.5884,
"step": 118
},
{
"epoch": 0.23,
"learning_rate": 0.0003998321864206699,
"loss": 0.5122,
"step": 119
},
{
"epoch": 0.23,
"learning_rate": 0.00039981405978546924,
"loss": 0.6453,
"step": 120
},
{
"epoch": 0.23,
"learning_rate": 0.0003997950041699408,
"loss": 0.4665,
"step": 121
},
{
"epoch": 0.23,
"learning_rate": 0.0003997750196626785,
"loss": 0.5428,
"step": 122
},
{
"epoch": 0.24,
"learning_rate": 0.00039975410635659464,
"loss": 0.4365,
"step": 123
},
{
"epoch": 0.24,
"learning_rate": 0.00039973226434891995,
"loss": 0.5978,
"step": 124
},
{
"epoch": 0.24,
"learning_rate": 0.00039970949374120286,
"loss": 0.7729,
"step": 125
},
{
"epoch": 0.24,
"learning_rate": 0.000399685794639309,
"loss": 0.6212,
"step": 126
},
{
"epoch": 0.24,
"learning_rate": 0.00039966116715342066,
"loss": 0.5426,
"step": 127
},
{
"epoch": 0.25,
"learning_rate": 0.00039963561139803676,
"loss": 0.5782,
"step": 128
},
{
"epoch": 0.25,
"learning_rate": 0.0003996091274919716,
"loss": 0.6701,
"step": 129
},
{
"epoch": 0.25,
"learning_rate": 0.0003995817155583548,
"loss": 0.6314,
"step": 130
},
{
"epoch": 0.25,
"learning_rate": 0.0003995533757246307,
"loss": 0.6662,
"step": 131
},
{
"epoch": 0.25,
"learning_rate": 0.0003995241081225573,
"loss": 0.5192,
"step": 132
},
{
"epoch": 0.26,
"learning_rate": 0.0003994939128882065,
"loss": 0.5591,
"step": 133
},
{
"epoch": 0.26,
"learning_rate": 0.0003994627901619625,
"loss": 0.5809,
"step": 134
},
{
"epoch": 0.26,
"learning_rate": 0.0003994307400885219,
"loss": 0.4871,
"step": 135
},
{
"epoch": 0.26,
"learning_rate": 0.0003993977628168928,
"loss": 0.6666,
"step": 136
},
{
"epoch": 0.26,
"learning_rate": 0.0003993638585003938,
"loss": 0.6469,
"step": 137
},
{
"epoch": 0.27,
"learning_rate": 0.00039932902729665357,
"loss": 0.5727,
"step": 138
},
{
"epoch": 0.27,
"learning_rate": 0.00039929326936761036,
"loss": 0.6715,
"step": 139
},
{
"epoch": 0.27,
"learning_rate": 0.00039925658487951067,
"loss": 0.5686,
"step": 140
},
{
"epoch": 0.27,
"learning_rate": 0.00039921897400290894,
"loss": 0.501,
"step": 141
},
{
"epoch": 0.27,
"learning_rate": 0.00039918043691266665,
"loss": 0.5795,
"step": 142
},
{
"epoch": 0.28,
"learning_rate": 0.00039914097378795124,
"loss": 0.6287,
"step": 143
},
{
"epoch": 0.28,
"learning_rate": 0.00039910058481223564,
"loss": 0.7016,
"step": 144
},
{
"epoch": 0.28,
"learning_rate": 0.00039905927017329726,
"loss": 0.6232,
"step": 145
},
{
"epoch": 0.28,
"learning_rate": 0.00039901703006321715,
"loss": 0.5291,
"step": 146
},
{
"epoch": 0.28,
"learning_rate": 0.00039897386467837903,
"loss": 0.5297,
"step": 147
},
{
"epoch": 0.28,
"learning_rate": 0.00039892977421946844,
"loss": 0.5784,
"step": 148
},
{
"epoch": 0.29,
"learning_rate": 0.0003988847588914718,
"loss": 0.5714,
"step": 149
},
{
"epoch": 0.29,
"learning_rate": 0.0003988388189036754,
"loss": 0.5044,
"step": 150
},
{
"epoch": 0.29,
"learning_rate": 0.0003987919544696646,
"loss": 0.8246,
"step": 151
},
{
"epoch": 0.29,
"learning_rate": 0.0003987441658073226,
"loss": 0.5048,
"step": 152
},
{
"epoch": 0.29,
"learning_rate": 0.0003986954531388297,
"loss": 0.5433,
"step": 153
},
{
"epoch": 0.3,
"learning_rate": 0.00039864581669066186,
"loss": 0.5251,
"step": 154
},
{
"epoch": 0.3,
"learning_rate": 0.0003985952566935902,
"loss": 0.5708,
"step": 155
},
{
"epoch": 0.3,
"learning_rate": 0.00039854377338267936,
"loss": 0.6276,
"step": 156
},
{
"epoch": 0.3,
"learning_rate": 0.00039849136699728684,
"loss": 0.4915,
"step": 157
},
{
"epoch": 0.3,
"learning_rate": 0.0003984380377810617,
"loss": 0.6389,
"step": 158
},
{
"epoch": 0.31,
"learning_rate": 0.00039838378598194325,
"loss": 0.6067,
"step": 159
},
{
"epoch": 0.31,
"learning_rate": 0.00039832861185216045,
"loss": 0.6136,
"step": 160
},
{
"epoch": 0.31,
"learning_rate": 0.0003982725156482301,
"loss": 0.5597,
"step": 161
},
{
"epoch": 0.31,
"learning_rate": 0.000398215497630956,
"loss": 0.5957,
"step": 162
},
{
"epoch": 0.31,
"learning_rate": 0.0003981575580654278,
"loss": 0.5853,
"step": 163
},
{
"epoch": 0.32,
"learning_rate": 0.0003980986972210194,
"loss": 0.5462,
"step": 164
},
{
"epoch": 0.32,
"learning_rate": 0.0003980389153713881,
"loss": 0.5302,
"step": 165
},
{
"epoch": 0.32,
"learning_rate": 0.00039797821279447307,
"loss": 0.5395,
"step": 166
},
{
"epoch": 0.32,
"learning_rate": 0.00039791658977249425,
"loss": 0.7004,
"step": 167
},
{
"epoch": 0.32,
"learning_rate": 0.00039785404659195084,
"loss": 0.5622,
"step": 168
},
{
"epoch": 0.33,
"learning_rate": 0.00039779058354362013,
"loss": 0.5759,
"step": 169
},
{
"epoch": 0.33,
"learning_rate": 0.000397726200922556,
"loss": 0.6184,
"step": 170
},
{
"epoch": 0.33,
"learning_rate": 0.0003976608990280877,
"loss": 0.5488,
"step": 171
},
{
"epoch": 0.33,
"learning_rate": 0.0003975946781638183,
"loss": 0.6162,
"step": 172
},
{
"epoch": 0.33,
"learning_rate": 0.0003975275386376236,
"loss": 0.558,
"step": 173
},
{
"epoch": 0.34,
"learning_rate": 0.0003974594807616502,
"loss": 0.519,
"step": 174
},
{
"epoch": 0.34,
"learning_rate": 0.0003973905048523144,
"loss": 0.6195,
"step": 175
},
{
"epoch": 0.34,
"learning_rate": 0.00039732061123030064,
"loss": 0.5991,
"step": 176
},
{
"epoch": 0.34,
"learning_rate": 0.0003972498002205601,
"loss": 0.5428,
"step": 177
},
{
"epoch": 0.34,
"learning_rate": 0.00039717807215230896,
"loss": 0.5323,
"step": 178
},
{
"epoch": 0.34,
"learning_rate": 0.00039710542735902705,
"loss": 0.5307,
"step": 179
},
{
"epoch": 0.35,
"learning_rate": 0.0003970318661784564,
"loss": 0.5783,
"step": 180
},
{
"epoch": 0.35,
"learning_rate": 0.0003969573889525993,
"loss": 0.5924,
"step": 181
},
{
"epoch": 0.35,
"learning_rate": 0.00039688199602771714,
"loss": 0.5902,
"step": 182
},
{
"epoch": 0.35,
"learning_rate": 0.00039680568775432855,
"loss": 0.6291,
"step": 183
},
{
"epoch": 0.35,
"learning_rate": 0.0003967284644872077,
"loss": 0.5942,
"step": 184
},
{
"epoch": 0.36,
"learning_rate": 0.0003966503265853829,
"loss": 0.4878,
"step": 185
},
{
"epoch": 0.36,
"learning_rate": 0.0003965712744121347,
"loss": 0.6487,
"step": 186
},
{
"epoch": 0.36,
"learning_rate": 0.0003964913083349945,
"loss": 0.6111,
"step": 187
},
{
"epoch": 0.36,
"learning_rate": 0.00039641042872574233,
"loss": 0.6072,
"step": 188
},
{
"epoch": 0.36,
"learning_rate": 0.00039632863596040575,
"loss": 0.716,
"step": 189
},
{
"epoch": 0.37,
"learning_rate": 0.00039624593041925763,
"loss": 0.6178,
"step": 190
},
{
"epoch": 0.37,
"learning_rate": 0.0003961623124868145,
"loss": 0.6323,
"step": 191
},
{
"epoch": 0.37,
"learning_rate": 0.00039607778255183485,
"loss": 0.5821,
"step": 192
},
{
"epoch": 0.37,
"learning_rate": 0.0003959923410073174,
"loss": 0.6738,
"step": 193
},
{
"epoch": 0.37,
"learning_rate": 0.0003959059882504989,
"loss": 0.6203,
"step": 194
},
{
"epoch": 0.38,
"learning_rate": 0.00039581872468285277,
"loss": 0.632,
"step": 195
},
{
"epoch": 0.38,
"learning_rate": 0.0003957305507100868,
"loss": 0.5857,
"step": 196
},
{
"epoch": 0.38,
"learning_rate": 0.00039564146674214164,
"loss": 0.6311,
"step": 197
},
{
"epoch": 0.38,
"learning_rate": 0.0003955514731931885,
"loss": 0.5889,
"step": 198
},
{
"epoch": 0.38,
"learning_rate": 0.00039546057048162763,
"loss": 0.5201,
"step": 199
},
{
"epoch": 0.39,
"learning_rate": 0.00039536875903008607,
"loss": 0.5581,
"step": 200
},
{
"epoch": 0.39,
"learning_rate": 0.00039527603926541586,
"loss": 0.5104,
"step": 201
},
{
"epoch": 0.39,
"learning_rate": 0.00039518241161869193,
"loss": 0.5978,
"step": 202
},
{
"epoch": 0.39,
"learning_rate": 0.00039508787652521013,
"loss": 0.6244,
"step": 203
},
{
"epoch": 0.39,
"learning_rate": 0.00039499243442448536,
"loss": 0.589,
"step": 204
},
{
"epoch": 0.39,
"learning_rate": 0.0003948960857602493,
"loss": 0.575,
"step": 205
},
{
"epoch": 0.4,
"learning_rate": 0.0003947988309804485,
"loss": 0.5494,
"step": 206
},
{
"epoch": 0.4,
"learning_rate": 0.0003947006705372422,
"loss": 0.4895,
"step": 207
},
{
"epoch": 0.4,
"learning_rate": 0.00039460160488700036,
"loss": 0.5479,
"step": 208
},
{
"epoch": 0.4,
"learning_rate": 0.00039450163449030124,
"loss": 0.5893,
"step": 209
},
{
"epoch": 0.4,
"learning_rate": 0.0003944007598119297,
"loss": 0.5451,
"step": 210
},
{
"epoch": 0.41,
"learning_rate": 0.0003942989813208747,
"loss": 0.5582,
"step": 211
},
{
"epoch": 0.41,
"learning_rate": 0.0003941962994903273,
"loss": 0.5121,
"step": 212
},
{
"epoch": 0.41,
"learning_rate": 0.00039409271479767826,
"loss": 0.6324,
"step": 213
},
{
"epoch": 0.41,
"learning_rate": 0.000393988227724516,
"loss": 0.6118,
"step": 214
},
{
"epoch": 0.41,
"learning_rate": 0.0003938828387566244,
"loss": 0.6303,
"step": 215
},
{
"epoch": 0.42,
"learning_rate": 0.0003937765483839804,
"loss": 0.7705,
"step": 216
},
{
"epoch": 0.42,
"learning_rate": 0.0003936693571007517,
"loss": 0.6224,
"step": 217
},
{
"epoch": 0.42,
"learning_rate": 0.0003935612654052946,
"loss": 0.5664,
"step": 218
},
{
"epoch": 0.42,
"learning_rate": 0.00039345227380015163,
"loss": 0.66,
"step": 219
},
{
"epoch": 0.42,
"learning_rate": 0.00039334238279204906,
"loss": 0.5582,
"step": 220
},
{
"epoch": 0.43,
"learning_rate": 0.00039323159289189505,
"loss": 0.6087,
"step": 221
},
{
"epoch": 0.43,
"learning_rate": 0.0003931199046147764,
"loss": 0.5566,
"step": 222
},
{
"epoch": 0.43,
"learning_rate": 0.00039300731847995716,
"loss": 0.5775,
"step": 223
},
{
"epoch": 0.43,
"learning_rate": 0.00039289383501087534,
"loss": 0.5081,
"step": 224
},
{
"epoch": 0.43,
"learning_rate": 0.00039277945473514104,
"loss": 0.5218,
"step": 225
},
{
"epoch": 0.44,
"learning_rate": 0.0003926641781845338,
"loss": 0.6655,
"step": 226
},
{
"epoch": 0.44,
"learning_rate": 0.0003925480058950002,
"loss": 0.5735,
"step": 227
},
{
"epoch": 0.44,
"learning_rate": 0.00039243093840665114,
"loss": 0.6609,
"step": 228
},
{
"epoch": 0.44,
"learning_rate": 0.0003923129762637596,
"loss": 0.7323,
"step": 229
},
{
"epoch": 0.44,
"learning_rate": 0.000392194120014758,
"loss": 0.5703,
"step": 230
},
{
"epoch": 0.44,
"learning_rate": 0.00039207437021223583,
"loss": 0.6545,
"step": 231
},
{
"epoch": 0.45,
"learning_rate": 0.0003919537274129366,
"loss": 0.521,
"step": 232
},
{
"epoch": 0.45,
"learning_rate": 0.00039183219217775564,
"loss": 0.5257,
"step": 233
},
{
"epoch": 0.45,
"learning_rate": 0.0003917097650717377,
"loss": 0.5487,
"step": 234
},
{
"epoch": 0.45,
"learning_rate": 0.00039158644666407365,
"loss": 0.4861,
"step": 235
},
{
"epoch": 0.45,
"learning_rate": 0.00039146223752809845,
"loss": 0.4928,
"step": 236
},
{
"epoch": 0.46,
"learning_rate": 0.0003913371382412883,
"loss": 0.5253,
"step": 237
},
{
"epoch": 0.46,
"learning_rate": 0.00039121114938525756,
"loss": 0.6155,
"step": 238
},
{
"epoch": 0.46,
"learning_rate": 0.00039108427154575684,
"loss": 0.55,
"step": 239
},
{
"epoch": 0.46,
"learning_rate": 0.00039095650531266967,
"loss": 0.6617,
"step": 240
},
{
"epoch": 0.46,
"learning_rate": 0.00039082785128000976,
"loss": 0.5198,
"step": 241
},
{
"epoch": 0.47,
"learning_rate": 0.00039069831004591866,
"loss": 0.5302,
"step": 242
},
{
"epoch": 0.47,
"learning_rate": 0.0003905678822126625,
"loss": 0.5347,
"step": 243
},
{
"epoch": 0.47,
"learning_rate": 0.00039043656838662946,
"loss": 0.531,
"step": 244
},
{
"epoch": 0.47,
"learning_rate": 0.00039030436917832697,
"loss": 0.4884,
"step": 245
},
{
"epoch": 0.47,
"learning_rate": 0.00039017128520237883,
"loss": 0.6027,
"step": 246
},
{
"epoch": 0.48,
"learning_rate": 0.0003900373170775222,
"loss": 0.5537,
"step": 247
},
{
"epoch": 0.48,
"learning_rate": 0.00038990246542660494,
"loss": 0.5753,
"step": 248
},
{
"epoch": 0.48,
"learning_rate": 0.00038976673087658256,
"loss": 0.5059,
"step": 249
},
{
"epoch": 0.48,
"learning_rate": 0.00038963011405851537,
"loss": 0.5118,
"step": 250
},
{
"epoch": 0.48,
"learning_rate": 0.00038949261560756565,
"loss": 0.5645,
"step": 251
},
{
"epoch": 0.49,
"learning_rate": 0.0003893542361629944,
"loss": 0.5623,
"step": 252
},
{
"epoch": 0.49,
"learning_rate": 0.00038921497636815866,
"loss": 0.5216,
"step": 253
},
{
"epoch": 0.49,
"learning_rate": 0.0003890748368705085,
"loss": 0.4501,
"step": 254
},
{
"epoch": 0.49,
"learning_rate": 0.0003889338183215838,
"loss": 0.48,
"step": 255
},
{
"epoch": 0.49,
"learning_rate": 0.00038879192137701135,
"loss": 0.5218,
"step": 256
},
{
"epoch": 0.49,
"learning_rate": 0.0003886491466965018,
"loss": 0.5858,
"step": 257
},
{
"epoch": 0.5,
"learning_rate": 0.00038850549494384685,
"loss": 0.6124,
"step": 258
},
{
"epoch": 0.5,
"learning_rate": 0.00038836096678691536,
"loss": 0.4645,
"step": 259
},
{
"epoch": 0.5,
"learning_rate": 0.00038821556289765136,
"loss": 0.474,
"step": 260
},
{
"epoch": 0.5,
"learning_rate": 0.00038806928395207003,
"loss": 0.4364,
"step": 261
},
{
"epoch": 0.5,
"learning_rate": 0.00038792213063025484,
"loss": 0.5821,
"step": 262
},
{
"epoch": 0.51,
"learning_rate": 0.0003877741036163547,
"loss": 0.5393,
"step": 263
},
{
"epoch": 0.51,
"learning_rate": 0.0003876252035985804,
"loss": 0.5373,
"step": 264
},
{
"epoch": 0.51,
"learning_rate": 0.0003874754312692013,
"loss": 0.6021,
"step": 265
},
{
"epoch": 0.51,
"learning_rate": 0.0003873247873245426,
"loss": 0.4549,
"step": 266
},
{
"epoch": 0.51,
"learning_rate": 0.0003871732724649817,
"loss": 0.5994,
"step": 267
},
{
"epoch": 0.52,
"learning_rate": 0.0003870208873949453,
"loss": 0.4764,
"step": 268
},
{
"epoch": 0.52,
"learning_rate": 0.00038686763282290556,
"loss": 0.4311,
"step": 269
},
{
"epoch": 0.52,
"learning_rate": 0.0003867135094613774,
"loss": 0.5462,
"step": 270
},
{
"epoch": 0.52,
"learning_rate": 0.0003865585180269148,
"loss": 0.5006,
"step": 271
},
{
"epoch": 0.52,
"learning_rate": 0.0003864026592401076,
"loss": 0.5347,
"step": 272
},
{
"epoch": 0.53,
"learning_rate": 0.00038624593382557835,
"loss": 0.5242,
"step": 273
},
{
"epoch": 0.53,
"learning_rate": 0.00038608834251197856,
"loss": 0.5005,
"step": 274
},
{
"epoch": 0.53,
"learning_rate": 0.00038592988603198554,
"loss": 0.5436,
"step": 275
},
{
"epoch": 0.53,
"learning_rate": 0.000385770565122299,
"loss": 0.4658,
"step": 276
},
{
"epoch": 0.53,
"learning_rate": 0.0003856103805236375,
"loss": 0.5273,
"step": 277
},
{
"epoch": 0.54,
"learning_rate": 0.00038544933298073516,
"loss": 0.436,
"step": 278
},
{
"epoch": 0.54,
"learning_rate": 0.00038528742324233804,
"loss": 0.4785,
"step": 279
},
{
"epoch": 0.54,
"learning_rate": 0.00038512465206120086,
"loss": 0.5366,
"step": 280
},
{
"epoch": 0.54,
"learning_rate": 0.00038496102019408324,
"loss": 0.4448,
"step": 281
},
{
"epoch": 0.54,
"learning_rate": 0.00038479652840174637,
"loss": 0.5132,
"step": 282
},
{
"epoch": 0.54,
"learning_rate": 0.00038463117744894955,
"loss": 0.7918,
"step": 283
},
{
"epoch": 0.55,
"learning_rate": 0.00038446496810444627,
"loss": 0.5309,
"step": 284
},
{
"epoch": 0.55,
"learning_rate": 0.00038429790114098114,
"loss": 0.5316,
"step": 285
},
{
"epoch": 0.55,
"learning_rate": 0.00038412997733528576,
"loss": 0.4611,
"step": 286
},
{
"epoch": 0.55,
"learning_rate": 0.00038396119746807563,
"loss": 0.4609,
"step": 287
},
{
"epoch": 0.55,
"learning_rate": 0.00038379156232404613,
"loss": 0.5821,
"step": 288
},
{
"epoch": 0.56,
"learning_rate": 0.0003836210726918691,
"loss": 0.5883,
"step": 289
},
{
"epoch": 0.56,
"learning_rate": 0.0003834497293641889,
"loss": 0.5012,
"step": 290
},
{
"epoch": 0.56,
"learning_rate": 0.00038327753313761913,
"loss": 0.4457,
"step": 291
},
{
"epoch": 0.56,
"learning_rate": 0.00038310448481273867,
"loss": 0.4851,
"step": 292
},
{
"epoch": 0.56,
"learning_rate": 0.00038293058519408787,
"loss": 0.5622,
"step": 293
},
{
"epoch": 0.57,
"learning_rate": 0.00038275583509016507,
"loss": 0.5703,
"step": 294
},
{
"epoch": 0.57,
"learning_rate": 0.00038258023531342265,
"loss": 0.5718,
"step": 295
},
{
"epoch": 0.57,
"learning_rate": 0.0003824037866802632,
"loss": 0.5183,
"step": 296
},
{
"epoch": 0.57,
"learning_rate": 0.00038222649001103614,
"loss": 0.5085,
"step": 297
},
{
"epoch": 0.57,
"learning_rate": 0.00038204834613003323,
"loss": 0.5388,
"step": 298
},
{
"epoch": 0.58,
"learning_rate": 0.00038186935586548537,
"loss": 0.5425,
"step": 299
},
{
"epoch": 0.58,
"learning_rate": 0.0003816895200495584,
"loss": 0.447,
"step": 300
},
{
"epoch": 0.58,
"learning_rate": 0.0003815088395183493,
"loss": 0.5541,
"step": 301
},
{
"epoch": 0.58,
"learning_rate": 0.00038132731511188227,
"loss": 0.5518,
"step": 302
},
{
"epoch": 0.58,
"learning_rate": 0.000381144947674105,
"loss": 0.5074,
"step": 303
},
{
"epoch": 0.59,
"learning_rate": 0.0003809617380528847,
"loss": 0.5134,
"step": 304
},
{
"epoch": 0.59,
"learning_rate": 0.0003807776871000037,
"loss": 0.4599,
"step": 305
},
{
"epoch": 0.59,
"learning_rate": 0.0003805927956711562,
"loss": 0.5838,
"step": 306
},
{
"epoch": 0.59,
"learning_rate": 0.00038040706462594395,
"loss": 0.5216,
"step": 307
},
{
"epoch": 0.59,
"learning_rate": 0.00038022049482787216,
"loss": 0.5323,
"step": 308
},
{
"epoch": 0.6,
"learning_rate": 0.0003800330871443456,
"loss": 0.5681,
"step": 309
},
{
"epoch": 0.6,
"learning_rate": 0.00037984484244666446,
"loss": 0.4172,
"step": 310
},
{
"epoch": 0.6,
"learning_rate": 0.0003796557616100207,
"loss": 0.4958,
"step": 311
},
{
"epoch": 0.6,
"learning_rate": 0.0003794658455134934,
"loss": 0.662,
"step": 312
},
{
"epoch": 0.6,
"learning_rate": 0.0003792750950400451,
"loss": 0.5832,
"step": 313
},
{
"epoch": 0.6,
"learning_rate": 0.0003790835110765174,
"loss": 0.4271,
"step": 314
},
{
"epoch": 0.61,
"learning_rate": 0.0003788910945136271,
"loss": 0.4842,
"step": 315
},
{
"epoch": 0.61,
"learning_rate": 0.00037869784624596186,
"loss": 0.4656,
"step": 316
},
{
"epoch": 0.61,
"learning_rate": 0.00037850376717197626,
"loss": 0.4981,
"step": 317
},
{
"epoch": 0.61,
"learning_rate": 0.00037830885819398733,
"loss": 0.5162,
"step": 318
},
{
"epoch": 0.61,
"learning_rate": 0.00037811312021817067,
"loss": 0.652,
"step": 319
},
{
"epoch": 0.62,
"learning_rate": 0.0003779165541545558,
"loss": 0.5104,
"step": 320
},
{
"epoch": 0.62,
"learning_rate": 0.0003777191609170225,
"loss": 0.4971,
"step": 321
},
{
"epoch": 0.62,
"learning_rate": 0.0003775209414232962,
"loss": 0.4871,
"step": 322
},
{
"epoch": 0.62,
"learning_rate": 0.0003773218965949436,
"loss": 0.5226,
"step": 323
},
{
"epoch": 0.62,
"learning_rate": 0.00037712202735736884,
"loss": 0.4823,
"step": 324
},
{
"epoch": 0.63,
"learning_rate": 0.0003769213346398087,
"loss": 0.497,
"step": 325
},
{
"epoch": 0.63,
"learning_rate": 0.0003767198193753286,
"loss": 0.5976,
"step": 326
},
{
"epoch": 0.63,
"learning_rate": 0.0003765174825008181,
"loss": 0.4532,
"step": 327
},
{
"epoch": 0.63,
"learning_rate": 0.0003763143249569868,
"loss": 0.5236,
"step": 328
},
{
"epoch": 0.63,
"learning_rate": 0.00037611034768835947,
"loss": 0.6513,
"step": 329
},
{
"epoch": 0.64,
"learning_rate": 0.00037590555164327224,
"loss": 0.5686,
"step": 330
},
{
"epoch": 0.64,
"learning_rate": 0.00037569993777386774,
"loss": 0.456,
"step": 331
},
{
"epoch": 0.64,
"learning_rate": 0.0003754935070360909,
"loss": 0.5181,
"step": 332
},
{
"epoch": 0.64,
"learning_rate": 0.0003752862603896846,
"loss": 0.4765,
"step": 333
},
{
"epoch": 0.64,
"learning_rate": 0.00037507819879818477,
"loss": 0.5363,
"step": 334
},
{
"epoch": 0.65,
"learning_rate": 0.00037486932322891646,
"loss": 0.4584,
"step": 335
},
{
"epoch": 0.65,
"learning_rate": 0.00037465963465298886,
"loss": 0.5428,
"step": 336
},
{
"epoch": 0.65,
"learning_rate": 0.0003744491340452913,
"loss": 0.3927,
"step": 337
},
{
"epoch": 0.65,
"learning_rate": 0.0003742378223844882,
"loss": 0.5478,
"step": 338
},
{
"epoch": 0.65,
"learning_rate": 0.0003740257006530147,
"loss": 0.469,
"step": 339
},
{
"epoch": 0.65,
"learning_rate": 0.00037381276983707246,
"loss": 0.5169,
"step": 340
},
{
"epoch": 0.66,
"learning_rate": 0.00037359903092662434,
"loss": 0.4797,
"step": 341
},
{
"epoch": 0.66,
"learning_rate": 0.00037338448491539054,
"loss": 0.5315,
"step": 342
},
{
"epoch": 0.66,
"learning_rate": 0.00037316913280084353,
"loss": 0.4422,
"step": 343
},
{
"epoch": 0.66,
"learning_rate": 0.0003729529755842035,
"loss": 0.4426,
"step": 344
},
{
"epoch": 0.66,
"learning_rate": 0.0003727360142704337,
"loss": 0.4718,
"step": 345
},
{
"epoch": 0.67,
"learning_rate": 0.0003725182498682361,
"loss": 0.5585,
"step": 346
},
{
"epoch": 0.67,
"learning_rate": 0.0003722996833900459,
"loss": 0.4775,
"step": 347
},
{
"epoch": 0.67,
"learning_rate": 0.0003720803158520279,
"loss": 0.6014,
"step": 348
},
{
"epoch": 0.67,
"learning_rate": 0.00037186014827407076,
"loss": 0.5117,
"step": 349
},
{
"epoch": 0.67,
"learning_rate": 0.0003716391816797829,
"loss": 0.5404,
"step": 350
},
{
"epoch": 0.68,
"learning_rate": 0.0003714174170964876,
"loss": 0.527,
"step": 351
},
{
"epoch": 0.68,
"learning_rate": 0.00037119485555521796,
"loss": 0.4555,
"step": 352
},
{
"epoch": 0.68,
"learning_rate": 0.00037097149809071255,
"loss": 0.5372,
"step": 353
},
{
"epoch": 0.68,
"learning_rate": 0.00037074734574141016,
"loss": 0.5377,
"step": 354
},
{
"epoch": 0.68,
"learning_rate": 0.0003705223995494454,
"loss": 0.4925,
"step": 355
},
{
"epoch": 0.69,
"learning_rate": 0.00037029666056064345,
"loss": 0.482,
"step": 356
},
{
"epoch": 0.69,
"learning_rate": 0.00037007012982451546,
"loss": 0.5235,
"step": 357
},
{
"epoch": 0.69,
"learning_rate": 0.00036984280839425356,
"loss": 0.4957,
"step": 358
},
{
"epoch": 0.69,
"learning_rate": 0.000369614697326726,
"loss": 0.5379,
"step": 359
},
{
"epoch": 0.69,
"learning_rate": 0.0003693857976824721,
"loss": 0.4653,
"step": 360
},
{
"epoch": 0.7,
"learning_rate": 0.00036915611052569785,
"loss": 0.469,
"step": 361
},
{
"epoch": 0.7,
"learning_rate": 0.0003689256369242702,
"loss": 0.5618,
"step": 362
},
{
"epoch": 0.7,
"learning_rate": 0.0003686943779497124,
"loss": 0.4459,
"step": 363
},
{
"epoch": 0.7,
"learning_rate": 0.0003684623346771995,
"loss": 0.5606,
"step": 364
},
{
"epoch": 0.7,
"learning_rate": 0.0003682295081855524,
"loss": 0.4368,
"step": 365
},
{
"epoch": 0.7,
"learning_rate": 0.00036799589955723375,
"loss": 0.4168,
"step": 366
},
{
"epoch": 0.71,
"learning_rate": 0.00036776150987834243,
"loss": 0.4664,
"step": 367
},
{
"epoch": 0.71,
"learning_rate": 0.00036752634023860846,
"loss": 0.4737,
"step": 368
},
{
"epoch": 0.71,
"learning_rate": 0.0003672903917313883,
"loss": 0.4247,
"step": 369
},
{
"epoch": 0.71,
"learning_rate": 0.00036705366545365935,
"loss": 0.5677,
"step": 370
},
{
"epoch": 0.71,
"learning_rate": 0.00036681616250601505,
"loss": 0.5441,
"step": 371
},
{
"epoch": 0.72,
"learning_rate": 0.0003665778839926599,
"loss": 0.6247,
"step": 372
},
{
"epoch": 0.72,
"learning_rate": 0.00036633883102140405,
"loss": 0.5217,
"step": 373
},
{
"epoch": 0.72,
"learning_rate": 0.0003660990047036584,
"loss": 0.4651,
"step": 374
},
{
"epoch": 0.72,
"learning_rate": 0.0003658584061544291,
"loss": 0.4648,
"step": 375
},
{
"epoch": 0.72,
"learning_rate": 0.0003656170364923128,
"loss": 0.6048,
"step": 376
},
{
"epoch": 0.73,
"learning_rate": 0.00036537489683949114,
"loss": 0.4515,
"step": 377
},
{
"epoch": 0.73,
"learning_rate": 0.0003651319883217255,
"loss": 0.5096,
"step": 378
},
{
"epoch": 0.73,
"learning_rate": 0.00036488831206835207,
"loss": 0.4231,
"step": 379
},
{
"epoch": 0.73,
"learning_rate": 0.00036464386921227637,
"loss": 0.4903,
"step": 380
},
{
"epoch": 0.73,
"learning_rate": 0.00036439866088996796,
"loss": 0.5131,
"step": 381
},
{
"epoch": 0.74,
"learning_rate": 0.0003641526882414553,
"loss": 0.5986,
"step": 382
},
{
"epoch": 0.74,
"learning_rate": 0.0003639059524103203,
"loss": 0.6,
"step": 383
},
{
"epoch": 0.74,
"learning_rate": 0.0003636584545436931,
"loss": 0.5216,
"step": 384
},
{
"epoch": 0.74,
"learning_rate": 0.0003634101957922468,
"loss": 0.5144,
"step": 385
},
{
"epoch": 0.74,
"learning_rate": 0.00036316117731019184,
"loss": 0.4963,
"step": 386
},
{
"epoch": 0.75,
"learning_rate": 0.0003629114002552711,
"loss": 0.5657,
"step": 387
},
{
"epoch": 0.75,
"learning_rate": 0.00036266086578875384,
"loss": 0.5028,
"step": 388
},
{
"epoch": 0.75,
"learning_rate": 0.0003624095750754311,
"loss": 0.573,
"step": 389
},
{
"epoch": 0.75,
"learning_rate": 0.00036215752928360967,
"loss": 0.5199,
"step": 390
},
{
"epoch": 0.75,
"learning_rate": 0.0003619047295851068,
"loss": 0.656,
"step": 391
},
{
"epoch": 0.75,
"learning_rate": 0.00036165117715524506,
"loss": 0.5129,
"step": 392
},
{
"epoch": 0.76,
"learning_rate": 0.00036139687317284647,
"loss": 0.3945,
"step": 393
},
{
"epoch": 0.76,
"learning_rate": 0.0003611418188202271,
"loss": 0.5318,
"step": 394
},
{
"epoch": 0.76,
"learning_rate": 0.00036088601528319196,
"loss": 0.5344,
"step": 395
},
{
"epoch": 0.76,
"learning_rate": 0.00036062946375102885,
"loss": 0.5407,
"step": 396
},
{
"epoch": 0.76,
"learning_rate": 0.0003603721654165034,
"loss": 0.5364,
"step": 397
},
{
"epoch": 0.77,
"learning_rate": 0.00036011412147585306,
"loss": 0.5407,
"step": 398
},
{
"epoch": 0.77,
"learning_rate": 0.0003598553331287821,
"loss": 0.5999,
"step": 399
},
{
"epoch": 0.77,
"learning_rate": 0.0003595958015784555,
"loss": 0.624,
"step": 400
},
{
"epoch": 0.77,
"learning_rate": 0.00035933552803149354,
"loss": 0.5351,
"step": 401
},
{
"epoch": 0.77,
"learning_rate": 0.0003590745136979662,
"loss": 0.5196,
"step": 402
},
{
"epoch": 0.78,
"learning_rate": 0.00035881275979138765,
"loss": 0.5447,
"step": 403
},
{
"epoch": 0.78,
"learning_rate": 0.0003585502675287104,
"loss": 0.4908,
"step": 404
},
{
"epoch": 0.78,
"learning_rate": 0.00035828703813031986,
"loss": 0.5172,
"step": 405
},
{
"epoch": 0.78,
"learning_rate": 0.00035802307282002834,
"loss": 0.5923,
"step": 406
},
{
"epoch": 0.78,
"learning_rate": 0.0003577583728250699,
"loss": 0.568,
"step": 407
},
{
"epoch": 0.79,
"learning_rate": 0.00035749293937609395,
"loss": 0.4618,
"step": 408
},
{
"epoch": 0.79,
"learning_rate": 0.0003572267737071601,
"loss": 0.5351,
"step": 409
},
{
"epoch": 0.79,
"learning_rate": 0.0003569598770557322,
"loss": 0.5285,
"step": 410
},
{
"epoch": 0.79,
"learning_rate": 0.00035669225066267256,
"loss": 0.4571,
"step": 411
},
{
"epoch": 0.79,
"learning_rate": 0.00035642389577223625,
"loss": 0.4214,
"step": 412
},
{
"epoch": 0.8,
"learning_rate": 0.0003561548136320653,
"loss": 0.5393,
"step": 413
},
{
"epoch": 0.8,
"learning_rate": 0.0003558850054931828,
"loss": 0.549,
"step": 414
},
{
"epoch": 0.8,
"learning_rate": 0.00035561447260998714,
"loss": 0.4824,
"step": 415
},
{
"epoch": 0.8,
"learning_rate": 0.00035534321624024656,
"loss": 0.6244,
"step": 416
},
{
"epoch": 0.8,
"learning_rate": 0.00035507123764509245,
"loss": 0.5436,
"step": 417
},
{
"epoch": 0.8,
"learning_rate": 0.0003547985380890144,
"loss": 0.5198,
"step": 418
},
{
"epoch": 0.81,
"learning_rate": 0.00035452511883985366,
"loss": 0.5979,
"step": 419
},
{
"epoch": 0.81,
"learning_rate": 0.00035425098116879754,
"loss": 0.4158,
"step": 420
},
{
"epoch": 0.81,
"learning_rate": 0.00035397612635037356,
"loss": 0.5125,
"step": 421
},
{
"epoch": 0.81,
"learning_rate": 0.00035370055566244334,
"loss": 0.4699,
"step": 422
},
{
"epoch": 0.81,
"learning_rate": 0.0003534242703861966,
"loss": 0.5553,
"step": 423
},
{
"epoch": 0.82,
"learning_rate": 0.00035314727180614573,
"loss": 0.5969,
"step": 424
},
{
"epoch": 0.82,
"learning_rate": 0.00035286956121011897,
"loss": 0.456,
"step": 425
},
{
"epoch": 0.82,
"learning_rate": 0.0003525911398892552,
"loss": 0.5195,
"step": 426
},
{
"epoch": 0.82,
"learning_rate": 0.0003523120091379975,
"loss": 0.5187,
"step": 427
},
{
"epoch": 0.82,
"learning_rate": 0.00035203217025408726,
"loss": 0.5443,
"step": 428
},
{
"epoch": 0.83,
"learning_rate": 0.0003517516245385582,
"loss": 0.4476,
"step": 429
},
{
"epoch": 0.83,
"learning_rate": 0.0003514703732957301,
"loss": 0.5757,
"step": 430
},
{
"epoch": 0.83,
"learning_rate": 0.00035118841783320304,
"loss": 0.5129,
"step": 431
},
{
"epoch": 0.83,
"learning_rate": 0.00035090575946185114,
"loss": 0.6354,
"step": 432
},
{
"epoch": 0.83,
"learning_rate": 0.00035062239949581645,
"loss": 0.4065,
"step": 433
},
{
"epoch": 0.84,
"learning_rate": 0.000350338339252503,
"loss": 0.5472,
"step": 434
},
{
"epoch": 0.84,
"learning_rate": 0.00035005358005257045,
"loss": 0.5424,
"step": 435
},
{
"epoch": 0.84,
"learning_rate": 0.00034976812321992816,
"loss": 0.6127,
"step": 436
},
{
"epoch": 0.84,
"learning_rate": 0.00034948197008172877,
"loss": 0.63,
"step": 437
},
{
"epoch": 0.84,
"learning_rate": 0.0003491951219683625,
"loss": 0.413,
"step": 438
},
{
"epoch": 0.85,
"learning_rate": 0.00034890758021345034,
"loss": 0.5435,
"step": 439
},
{
"epoch": 0.85,
"learning_rate": 0.00034861934615383844,
"loss": 0.5433,
"step": 440
},
{
"epoch": 0.85,
"learning_rate": 0.00034833042112959153,
"loss": 0.4763,
"step": 441
},
{
"epoch": 0.85,
"learning_rate": 0.00034804080648398667,
"loss": 0.5727,
"step": 442
},
{
"epoch": 0.85,
"learning_rate": 0.00034775050356350727,
"loss": 0.5392,
"step": 443
},
{
"epoch": 0.85,
"learning_rate": 0.00034745951371783666,
"loss": 0.4981,
"step": 444
},
{
"epoch": 0.86,
"learning_rate": 0.0003471678382998518,
"loss": 0.5516,
"step": 445
},
{
"epoch": 0.86,
"learning_rate": 0.00034687547866561703,
"loss": 0.4965,
"step": 446
},
{
"epoch": 0.86,
"learning_rate": 0.0003465824361743779,
"loss": 0.4982,
"step": 447
},
{
"epoch": 0.86,
"learning_rate": 0.0003462887121885544,
"loss": 0.5619,
"step": 448
},
{
"epoch": 0.86,
"learning_rate": 0.0003459943080737353,
"loss": 0.5273,
"step": 449
},
{
"epoch": 0.87,
"learning_rate": 0.00034569922519867133,
"loss": 0.517,
"step": 450
}
],
"logging_steps": 1,
"max_steps": 1557,
"num_train_epochs": 3,
"save_steps": 50,
"total_flos": 6.020058348017418e+17,
"trial_name": null,
"trial_params": null
}