CreatorPhan's picture
Upload folder using huggingface_hub
890599a
raw
history blame
No virus
24.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.1173184357541899,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.00019972067039106145,
"loss": 2.6443,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 0.00019944134078212292,
"loss": 2.4104,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 0.00019916201117318435,
"loss": 2.4975,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 0.00019888268156424582,
"loss": 2.3513,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 0.0001986033519553073,
"loss": 2.4274,
"step": 5
},
{
"epoch": 0.03,
"learning_rate": 0.00019832402234636873,
"loss": 2.3628,
"step": 6
},
{
"epoch": 0.04,
"learning_rate": 0.0001980446927374302,
"loss": 2.3567,
"step": 7
},
{
"epoch": 0.04,
"learning_rate": 0.00019776536312849163,
"loss": 2.4121,
"step": 8
},
{
"epoch": 0.05,
"learning_rate": 0.00019748603351955307,
"loss": 2.4033,
"step": 9
},
{
"epoch": 0.06,
"learning_rate": 0.00019720670391061454,
"loss": 2.2805,
"step": 10
},
{
"epoch": 0.06,
"learning_rate": 0.00019692737430167598,
"loss": 2.2639,
"step": 11
},
{
"epoch": 0.07,
"learning_rate": 0.00019664804469273744,
"loss": 2.2724,
"step": 12
},
{
"epoch": 0.07,
"learning_rate": 0.00019636871508379888,
"loss": 2.332,
"step": 13
},
{
"epoch": 0.08,
"learning_rate": 0.00019608938547486035,
"loss": 2.2261,
"step": 14
},
{
"epoch": 0.08,
"learning_rate": 0.00019581005586592182,
"loss": 2.2208,
"step": 15
},
{
"epoch": 0.09,
"learning_rate": 0.00019553072625698326,
"loss": 2.3351,
"step": 16
},
{
"epoch": 0.09,
"learning_rate": 0.0001952513966480447,
"loss": 2.2475,
"step": 17
},
{
"epoch": 0.1,
"learning_rate": 0.00019497206703910616,
"loss": 2.3283,
"step": 18
},
{
"epoch": 0.11,
"learning_rate": 0.0001946927374301676,
"loss": 2.1346,
"step": 19
},
{
"epoch": 0.11,
"learning_rate": 0.00019441340782122907,
"loss": 2.131,
"step": 20
},
{
"epoch": 0.12,
"learning_rate": 0.0001941340782122905,
"loss": 2.1718,
"step": 21
},
{
"epoch": 0.12,
"learning_rate": 0.00019385474860335195,
"loss": 2.2446,
"step": 22
},
{
"epoch": 0.13,
"learning_rate": 0.0001935754189944134,
"loss": 2.306,
"step": 23
},
{
"epoch": 0.13,
"learning_rate": 0.00019329608938547488,
"loss": 2.1908,
"step": 24
},
{
"epoch": 0.14,
"learning_rate": 0.00019301675977653632,
"loss": 2.2844,
"step": 25
},
{
"epoch": 0.15,
"learning_rate": 0.00019273743016759779,
"loss": 2.2235,
"step": 26
},
{
"epoch": 0.15,
"learning_rate": 0.00019245810055865922,
"loss": 2.1842,
"step": 27
},
{
"epoch": 0.16,
"learning_rate": 0.00019217877094972066,
"loss": 2.2675,
"step": 28
},
{
"epoch": 0.16,
"learning_rate": 0.00019189944134078213,
"loss": 2.2532,
"step": 29
},
{
"epoch": 0.17,
"learning_rate": 0.00019162011173184357,
"loss": 2.1788,
"step": 30
},
{
"epoch": 0.17,
"learning_rate": 0.00019134078212290504,
"loss": 2.2494,
"step": 31
},
{
"epoch": 0.18,
"learning_rate": 0.0001910614525139665,
"loss": 2.1995,
"step": 32
},
{
"epoch": 0.18,
"learning_rate": 0.00019078212290502794,
"loss": 2.1451,
"step": 33
},
{
"epoch": 0.19,
"learning_rate": 0.0001905027932960894,
"loss": 2.223,
"step": 34
},
{
"epoch": 0.2,
"learning_rate": 0.00019022346368715085,
"loss": 2.2854,
"step": 35
},
{
"epoch": 0.2,
"learning_rate": 0.0001899441340782123,
"loss": 2.2265,
"step": 36
},
{
"epoch": 0.21,
"learning_rate": 0.00018966480446927375,
"loss": 2.1214,
"step": 37
},
{
"epoch": 0.21,
"learning_rate": 0.0001893854748603352,
"loss": 2.1898,
"step": 38
},
{
"epoch": 0.22,
"learning_rate": 0.00018910614525139666,
"loss": 2.1974,
"step": 39
},
{
"epoch": 0.22,
"learning_rate": 0.0001888268156424581,
"loss": 2.2259,
"step": 40
},
{
"epoch": 0.23,
"learning_rate": 0.00018854748603351957,
"loss": 2.2094,
"step": 41
},
{
"epoch": 0.23,
"learning_rate": 0.00018826815642458103,
"loss": 2.1731,
"step": 42
},
{
"epoch": 0.24,
"learning_rate": 0.00018798882681564247,
"loss": 2.2373,
"step": 43
},
{
"epoch": 0.25,
"learning_rate": 0.0001877094972067039,
"loss": 2.2295,
"step": 44
},
{
"epoch": 0.25,
"learning_rate": 0.00018743016759776538,
"loss": 2.1947,
"step": 45
},
{
"epoch": 0.26,
"learning_rate": 0.00018715083798882682,
"loss": 2.2115,
"step": 46
},
{
"epoch": 0.26,
"learning_rate": 0.00018687150837988828,
"loss": 2.1224,
"step": 47
},
{
"epoch": 0.27,
"learning_rate": 0.00018659217877094972,
"loss": 2.2137,
"step": 48
},
{
"epoch": 0.27,
"learning_rate": 0.00018631284916201116,
"loss": 2.2338,
"step": 49
},
{
"epoch": 0.28,
"learning_rate": 0.00018603351955307266,
"loss": 2.1298,
"step": 50
},
{
"epoch": 0.28,
"learning_rate": 0.0001857541899441341,
"loss": 2.0883,
"step": 51
},
{
"epoch": 0.29,
"learning_rate": 0.00018547486033519553,
"loss": 2.1216,
"step": 52
},
{
"epoch": 0.3,
"learning_rate": 0.000185195530726257,
"loss": 2.2112,
"step": 53
},
{
"epoch": 0.3,
"learning_rate": 0.00018491620111731844,
"loss": 2.1224,
"step": 54
},
{
"epoch": 0.31,
"learning_rate": 0.0001846368715083799,
"loss": 2.2375,
"step": 55
},
{
"epoch": 0.31,
"learning_rate": 0.00018435754189944135,
"loss": 2.2235,
"step": 56
},
{
"epoch": 0.32,
"learning_rate": 0.00018407821229050279,
"loss": 2.1682,
"step": 57
},
{
"epoch": 0.32,
"learning_rate": 0.00018379888268156425,
"loss": 2.2077,
"step": 58
},
{
"epoch": 0.33,
"learning_rate": 0.00018351955307262572,
"loss": 2.1596,
"step": 59
},
{
"epoch": 0.34,
"learning_rate": 0.00018324022346368716,
"loss": 2.1311,
"step": 60
},
{
"epoch": 0.34,
"learning_rate": 0.00018296089385474862,
"loss": 2.1333,
"step": 61
},
{
"epoch": 0.35,
"learning_rate": 0.00018268156424581006,
"loss": 2.0901,
"step": 62
},
{
"epoch": 0.35,
"learning_rate": 0.00018240223463687153,
"loss": 2.1971,
"step": 63
},
{
"epoch": 0.36,
"learning_rate": 0.00018212290502793297,
"loss": 2.2602,
"step": 64
},
{
"epoch": 0.36,
"learning_rate": 0.0001818435754189944,
"loss": 2.2194,
"step": 65
},
{
"epoch": 0.37,
"learning_rate": 0.00018156424581005588,
"loss": 2.1218,
"step": 66
},
{
"epoch": 0.37,
"learning_rate": 0.00018128491620111731,
"loss": 2.2049,
"step": 67
},
{
"epoch": 0.38,
"learning_rate": 0.00018100558659217878,
"loss": 2.1521,
"step": 68
},
{
"epoch": 0.39,
"learning_rate": 0.00018072625698324025,
"loss": 2.112,
"step": 69
},
{
"epoch": 0.39,
"learning_rate": 0.0001804469273743017,
"loss": 2.1906,
"step": 70
},
{
"epoch": 0.4,
"learning_rate": 0.00018016759776536313,
"loss": 2.1717,
"step": 71
},
{
"epoch": 0.4,
"learning_rate": 0.0001798882681564246,
"loss": 2.0712,
"step": 72
},
{
"epoch": 0.41,
"learning_rate": 0.00017960893854748603,
"loss": 2.141,
"step": 73
},
{
"epoch": 0.41,
"learning_rate": 0.0001793296089385475,
"loss": 2.0656,
"step": 74
},
{
"epoch": 0.42,
"learning_rate": 0.00017905027932960894,
"loss": 2.1125,
"step": 75
},
{
"epoch": 0.42,
"learning_rate": 0.00017877094972067038,
"loss": 2.0869,
"step": 76
},
{
"epoch": 0.43,
"learning_rate": 0.00017849162011173187,
"loss": 2.2478,
"step": 77
},
{
"epoch": 0.44,
"learning_rate": 0.0001782122905027933,
"loss": 2.1535,
"step": 78
},
{
"epoch": 0.44,
"learning_rate": 0.00017793296089385475,
"loss": 2.1927,
"step": 79
},
{
"epoch": 0.45,
"learning_rate": 0.00017765363128491622,
"loss": 2.1213,
"step": 80
},
{
"epoch": 0.45,
"learning_rate": 0.00017737430167597766,
"loss": 2.0981,
"step": 81
},
{
"epoch": 0.46,
"learning_rate": 0.00017709497206703912,
"loss": 2.1828,
"step": 82
},
{
"epoch": 0.46,
"learning_rate": 0.00017681564245810056,
"loss": 2.0562,
"step": 83
},
{
"epoch": 0.47,
"learning_rate": 0.000176536312849162,
"loss": 2.1334,
"step": 84
},
{
"epoch": 0.47,
"learning_rate": 0.00017625698324022347,
"loss": 2.1225,
"step": 85
},
{
"epoch": 0.48,
"learning_rate": 0.00017597765363128493,
"loss": 2.2098,
"step": 86
},
{
"epoch": 0.49,
"learning_rate": 0.00017569832402234637,
"loss": 2.1519,
"step": 87
},
{
"epoch": 0.49,
"learning_rate": 0.00017541899441340784,
"loss": 2.1132,
"step": 88
},
{
"epoch": 0.5,
"learning_rate": 0.00017513966480446928,
"loss": 2.0333,
"step": 89
},
{
"epoch": 0.5,
"learning_rate": 0.00017486033519553075,
"loss": 2.2764,
"step": 90
},
{
"epoch": 0.51,
"learning_rate": 0.00017458100558659218,
"loss": 2.1838,
"step": 91
},
{
"epoch": 0.51,
"learning_rate": 0.00017430167597765362,
"loss": 2.1386,
"step": 92
},
{
"epoch": 0.52,
"learning_rate": 0.0001740223463687151,
"loss": 2.1034,
"step": 93
},
{
"epoch": 0.53,
"learning_rate": 0.00017374301675977656,
"loss": 2.0346,
"step": 94
},
{
"epoch": 0.53,
"learning_rate": 0.000173463687150838,
"loss": 2.0274,
"step": 95
},
{
"epoch": 0.54,
"learning_rate": 0.00017318435754189946,
"loss": 2.1036,
"step": 96
},
{
"epoch": 0.54,
"learning_rate": 0.0001729050279329609,
"loss": 2.1208,
"step": 97
},
{
"epoch": 0.55,
"learning_rate": 0.00017262569832402237,
"loss": 2.0572,
"step": 98
},
{
"epoch": 0.55,
"learning_rate": 0.0001723463687150838,
"loss": 2.1702,
"step": 99
},
{
"epoch": 0.56,
"learning_rate": 0.00017206703910614525,
"loss": 2.1302,
"step": 100
},
{
"epoch": 0.56,
"learning_rate": 0.0001717877094972067,
"loss": 2.0175,
"step": 101
},
{
"epoch": 0.57,
"learning_rate": 0.00017150837988826815,
"loss": 2.1006,
"step": 102
},
{
"epoch": 0.58,
"learning_rate": 0.00017122905027932962,
"loss": 2.0662,
"step": 103
},
{
"epoch": 0.58,
"learning_rate": 0.00017094972067039109,
"loss": 1.988,
"step": 104
},
{
"epoch": 0.59,
"learning_rate": 0.00017067039106145253,
"loss": 2.1008,
"step": 105
},
{
"epoch": 0.59,
"learning_rate": 0.00017039106145251396,
"loss": 2.1482,
"step": 106
},
{
"epoch": 0.6,
"learning_rate": 0.00017011173184357543,
"loss": 2.1052,
"step": 107
},
{
"epoch": 0.6,
"learning_rate": 0.00016983240223463687,
"loss": 2.0978,
"step": 108
},
{
"epoch": 0.61,
"learning_rate": 0.00016955307262569834,
"loss": 2.1303,
"step": 109
},
{
"epoch": 0.61,
"learning_rate": 0.00016927374301675978,
"loss": 2.0794,
"step": 110
},
{
"epoch": 0.62,
"learning_rate": 0.00016899441340782122,
"loss": 2.1059,
"step": 111
},
{
"epoch": 0.63,
"learning_rate": 0.0001687150837988827,
"loss": 1.9642,
"step": 112
},
{
"epoch": 0.63,
"learning_rate": 0.00016843575418994415,
"loss": 2.0415,
"step": 113
},
{
"epoch": 0.64,
"learning_rate": 0.0001681564245810056,
"loss": 2.0795,
"step": 114
},
{
"epoch": 0.64,
"learning_rate": 0.00016787709497206705,
"loss": 2.0238,
"step": 115
},
{
"epoch": 0.65,
"learning_rate": 0.0001675977653631285,
"loss": 2.078,
"step": 116
},
{
"epoch": 0.65,
"learning_rate": 0.00016731843575418996,
"loss": 2.1362,
"step": 117
},
{
"epoch": 0.66,
"learning_rate": 0.0001670391061452514,
"loss": 2.0552,
"step": 118
},
{
"epoch": 0.66,
"learning_rate": 0.00016675977653631284,
"loss": 2.213,
"step": 119
},
{
"epoch": 0.67,
"learning_rate": 0.0001664804469273743,
"loss": 1.9999,
"step": 120
},
{
"epoch": 0.68,
"learning_rate": 0.00016620111731843577,
"loss": 2.06,
"step": 121
},
{
"epoch": 0.68,
"learning_rate": 0.0001659217877094972,
"loss": 2.0177,
"step": 122
},
{
"epoch": 0.69,
"learning_rate": 0.00016564245810055868,
"loss": 2.0504,
"step": 123
},
{
"epoch": 0.69,
"learning_rate": 0.00016536312849162012,
"loss": 2.0585,
"step": 124
},
{
"epoch": 0.7,
"learning_rate": 0.00016508379888268158,
"loss": 2.0273,
"step": 125
},
{
"epoch": 0.7,
"learning_rate": 0.00016480446927374302,
"loss": 2.0549,
"step": 126
},
{
"epoch": 0.71,
"learning_rate": 0.00016452513966480446,
"loss": 2.044,
"step": 127
},
{
"epoch": 0.72,
"learning_rate": 0.00016424581005586593,
"loss": 2.0731,
"step": 128
},
{
"epoch": 0.72,
"learning_rate": 0.00016396648044692737,
"loss": 2.0568,
"step": 129
},
{
"epoch": 0.73,
"learning_rate": 0.00016368715083798883,
"loss": 2.007,
"step": 130
},
{
"epoch": 0.73,
"learning_rate": 0.0001634078212290503,
"loss": 2.0707,
"step": 131
},
{
"epoch": 0.74,
"learning_rate": 0.00016312849162011174,
"loss": 1.9793,
"step": 132
},
{
"epoch": 0.74,
"learning_rate": 0.0001628491620111732,
"loss": 2.1311,
"step": 133
},
{
"epoch": 0.75,
"learning_rate": 0.00016256983240223465,
"loss": 2.0016,
"step": 134
},
{
"epoch": 0.75,
"learning_rate": 0.00016229050279329609,
"loss": 1.9945,
"step": 135
},
{
"epoch": 0.76,
"learning_rate": 0.00016201117318435755,
"loss": 2.0186,
"step": 136
},
{
"epoch": 0.77,
"learning_rate": 0.000161731843575419,
"loss": 2.0971,
"step": 137
},
{
"epoch": 0.77,
"learning_rate": 0.00016145251396648046,
"loss": 2.0883,
"step": 138
},
{
"epoch": 0.78,
"learning_rate": 0.00016117318435754192,
"loss": 2.0803,
"step": 139
},
{
"epoch": 0.78,
"learning_rate": 0.00016089385474860336,
"loss": 2.0617,
"step": 140
},
{
"epoch": 0.79,
"learning_rate": 0.00016061452513966483,
"loss": 2.1265,
"step": 141
},
{
"epoch": 0.79,
"learning_rate": 0.00016033519553072627,
"loss": 2.0151,
"step": 142
},
{
"epoch": 0.8,
"learning_rate": 0.0001600558659217877,
"loss": 1.996,
"step": 143
},
{
"epoch": 0.8,
"learning_rate": 0.00015977653631284918,
"loss": 2.0164,
"step": 144
},
{
"epoch": 0.81,
"learning_rate": 0.00015949720670391061,
"loss": 2.0314,
"step": 145
},
{
"epoch": 0.82,
"learning_rate": 0.00015921787709497208,
"loss": 1.9501,
"step": 146
},
{
"epoch": 0.82,
"learning_rate": 0.00015893854748603352,
"loss": 2.087,
"step": 147
},
{
"epoch": 0.83,
"learning_rate": 0.000158659217877095,
"loss": 2.0262,
"step": 148
},
{
"epoch": 0.83,
"learning_rate": 0.00015837988826815643,
"loss": 2.0765,
"step": 149
},
{
"epoch": 0.84,
"learning_rate": 0.0001581005586592179,
"loss": 2.105,
"step": 150
},
{
"epoch": 0.84,
"learning_rate": 0.00015782122905027933,
"loss": 1.9863,
"step": 151
},
{
"epoch": 0.85,
"learning_rate": 0.0001575418994413408,
"loss": 1.9873,
"step": 152
},
{
"epoch": 0.85,
"learning_rate": 0.00015726256983240224,
"loss": 2.0094,
"step": 153
},
{
"epoch": 0.86,
"learning_rate": 0.00015698324022346368,
"loss": 1.9141,
"step": 154
},
{
"epoch": 0.87,
"learning_rate": 0.00015670391061452514,
"loss": 1.917,
"step": 155
},
{
"epoch": 0.87,
"learning_rate": 0.00015642458100558658,
"loss": 2.109,
"step": 156
},
{
"epoch": 0.88,
"learning_rate": 0.00015614525139664805,
"loss": 1.9799,
"step": 157
},
{
"epoch": 0.88,
"learning_rate": 0.00015586592178770952,
"loss": 1.9571,
"step": 158
},
{
"epoch": 0.89,
"learning_rate": 0.00015558659217877096,
"loss": 1.9931,
"step": 159
},
{
"epoch": 0.89,
"learning_rate": 0.00015530726256983242,
"loss": 2.1004,
"step": 160
},
{
"epoch": 0.9,
"learning_rate": 0.00015502793296089386,
"loss": 2.0385,
"step": 161
},
{
"epoch": 0.91,
"learning_rate": 0.0001547486033519553,
"loss": 1.9751,
"step": 162
},
{
"epoch": 0.91,
"learning_rate": 0.00015446927374301677,
"loss": 2.0544,
"step": 163
},
{
"epoch": 0.92,
"learning_rate": 0.0001541899441340782,
"loss": 2.0069,
"step": 164
},
{
"epoch": 0.92,
"learning_rate": 0.00015391061452513967,
"loss": 1.9576,
"step": 165
},
{
"epoch": 0.93,
"learning_rate": 0.00015363128491620114,
"loss": 1.8991,
"step": 166
},
{
"epoch": 0.93,
"learning_rate": 0.00015335195530726258,
"loss": 1.9336,
"step": 167
},
{
"epoch": 0.94,
"learning_rate": 0.00015307262569832405,
"loss": 1.9736,
"step": 168
},
{
"epoch": 0.94,
"learning_rate": 0.00015279329608938548,
"loss": 1.9702,
"step": 169
},
{
"epoch": 0.95,
"learning_rate": 0.00015251396648044692,
"loss": 1.9055,
"step": 170
},
{
"epoch": 0.96,
"learning_rate": 0.0001522346368715084,
"loss": 2.0503,
"step": 171
},
{
"epoch": 0.96,
"learning_rate": 0.00015195530726256983,
"loss": 2.0039,
"step": 172
},
{
"epoch": 0.97,
"learning_rate": 0.0001516759776536313,
"loss": 1.9406,
"step": 173
},
{
"epoch": 0.97,
"learning_rate": 0.00015139664804469274,
"loss": 2.0525,
"step": 174
},
{
"epoch": 0.98,
"learning_rate": 0.0001511173184357542,
"loss": 1.9234,
"step": 175
},
{
"epoch": 0.98,
"learning_rate": 0.00015083798882681567,
"loss": 1.8614,
"step": 176
},
{
"epoch": 0.99,
"learning_rate": 0.0001505586592178771,
"loss": 1.9616,
"step": 177
},
{
"epoch": 0.99,
"learning_rate": 0.00015027932960893855,
"loss": 1.9509,
"step": 178
},
{
"epoch": 1.0,
"learning_rate": 0.00015000000000000001,
"loss": 1.9592,
"step": 179
},
{
"epoch": 1.01,
"learning_rate": 0.00014972067039106145,
"loss": 1.8991,
"step": 180
},
{
"epoch": 1.01,
"learning_rate": 0.00014944134078212292,
"loss": 1.9127,
"step": 181
},
{
"epoch": 1.02,
"learning_rate": 0.00014916201117318436,
"loss": 1.8982,
"step": 182
},
{
"epoch": 1.02,
"learning_rate": 0.0001488826815642458,
"loss": 1.9534,
"step": 183
},
{
"epoch": 1.03,
"learning_rate": 0.0001486033519553073,
"loss": 1.7794,
"step": 184
},
{
"epoch": 1.03,
"learning_rate": 0.00014832402234636873,
"loss": 1.7958,
"step": 185
},
{
"epoch": 1.04,
"learning_rate": 0.00014804469273743017,
"loss": 1.8282,
"step": 186
},
{
"epoch": 1.04,
"learning_rate": 0.00014776536312849164,
"loss": 2.0423,
"step": 187
},
{
"epoch": 1.05,
"learning_rate": 0.00014748603351955308,
"loss": 1.9282,
"step": 188
},
{
"epoch": 1.06,
"learning_rate": 0.00014720670391061454,
"loss": 1.9072,
"step": 189
},
{
"epoch": 1.06,
"learning_rate": 0.00014692737430167598,
"loss": 1.8665,
"step": 190
},
{
"epoch": 1.07,
"learning_rate": 0.00014664804469273742,
"loss": 1.9021,
"step": 191
},
{
"epoch": 1.07,
"learning_rate": 0.0001463687150837989,
"loss": 1.7308,
"step": 192
},
{
"epoch": 1.08,
"learning_rate": 0.00014608938547486035,
"loss": 1.9165,
"step": 193
},
{
"epoch": 1.08,
"learning_rate": 0.0001458100558659218,
"loss": 1.842,
"step": 194
},
{
"epoch": 1.09,
"learning_rate": 0.00014553072625698326,
"loss": 1.9128,
"step": 195
},
{
"epoch": 1.09,
"learning_rate": 0.0001452513966480447,
"loss": 1.8005,
"step": 196
},
{
"epoch": 1.1,
"learning_rate": 0.00014497206703910614,
"loss": 1.8547,
"step": 197
},
{
"epoch": 1.11,
"learning_rate": 0.0001446927374301676,
"loss": 1.9042,
"step": 198
},
{
"epoch": 1.11,
"learning_rate": 0.00014441340782122905,
"loss": 1.8609,
"step": 199
},
{
"epoch": 1.12,
"learning_rate": 0.0001441340782122905,
"loss": 1.9591,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 716,
"num_train_epochs": 4,
"save_steps": 100,
"total_flos": 1.0290223796256768e+17,
"trial_name": null,
"trial_params": null
}