|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9969788519637461, |
|
"global_step": 330, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8e-05, |
|
"loss": 2.1157, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00016, |
|
"loss": 2.2075, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00024, |
|
"loss": 2.1208, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00032, |
|
"loss": 2.2187, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0004, |
|
"loss": 2.2193, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000399961448096413, |
|
"loss": 2.1887, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0003998458072481446, |
|
"loss": 2.1639, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00039965312203694324, |
|
"loss": 2.1191, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0003993834667466256, |
|
"loss": 2.1907, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0003990369453344394, |
|
"loss": 2.1976, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0003986136913909853, |
|
"loss": 2.163, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00039811386808871546, |
|
"loss": 2.2323, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00039753766811902755, |
|
"loss": 2.1354, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00039688531361797835, |
|
"loss": 2.1643, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0003961570560806461, |
|
"loss": 2.2129, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0003953531762641745, |
|
"loss": 2.1702, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0003944739840795353, |
|
"loss": 2.1848, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00039351981847205196, |
|
"loss": 2.1619, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00039249104729072946, |
|
"loss": 2.1781, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003913880671464418, |
|
"loss": 2.2253, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00039021130325903074, |
|
"loss": 2.1372, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00038896120929337566, |
|
"loss": 2.1798, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00038763826718449685, |
|
"loss": 2.1592, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00038624298695176073, |
|
"loss": 2.1695, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0003847759065022574, |
|
"loss": 2.2302, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00038323759142342724, |
|
"loss": 2.2537, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003816286347650163, |
|
"loss": 2.2121, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00037994965681044433, |
|
"loss": 2.1516, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0003782013048376736, |
|
"loss": 2.1707, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000376384252869671, |
|
"loss": 2.202, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00037449920141455944, |
|
"loss": 2.1967, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0003725468771955584, |
|
"loss": 2.1701, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00037052803287081844, |
|
"loss": 2.1082, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00036844344674325733, |
|
"loss": 2.151, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0003662939224605091, |
|
"loss": 2.2102, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0003640802887051027, |
|
"loss": 2.187, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0003618033988749895, |
|
"loss": 2.2118, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00035946413075454146, |
|
"loss": 2.2232, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00035706338617614897, |
|
"loss": 2.1897, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0003546020906725474, |
|
"loss": 2.2366, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0003520811931200062, |
|
"loss": 2.1615, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0003495016653725194, |
|
"loss": 2.1765, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0003468645018871371, |
|
"loss": 2.2205, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00034417071934058377, |
|
"loss": 2.1722, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0003414213562373095, |
|
"loss": 2.1726, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0003386174725091272, |
|
"loss": 2.1401, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0003357601491065884, |
|
"loss": 2.2254, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00033285048758225635, |
|
"loss": 2.1489, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0003298896096660367, |
|
"loss": 2.2459, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0003268786568327291, |
|
"loss": 2.2103, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00032381878986196687, |
|
"loss": 2.1864, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0003207111883907143, |
|
"loss": 2.1641, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00031755705045849464, |
|
"loss": 2.1346, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00031435759204552244, |
|
"loss": 2.1639, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00031111404660392046, |
|
"loss": 2.2032, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00030782766458220014, |
|
"loss": 2.2095, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0003044997129431898, |
|
"loss": 2.1789, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00030113147467559695, |
|
"loss": 2.1947, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00029772424829939106, |
|
"loss": 2.2011, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002942793473651996, |
|
"loss": 2.1956, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00029079809994790937, |
|
"loss": 2.2043, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0002872818481346684, |
|
"loss": 2.1571, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002837319475074856, |
|
"loss": 2.2016, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0002801497666206282, |
|
"loss": 2.182, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.000276536686473018, |
|
"loss": 2.1583, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00027289409997583, |
|
"loss": 2.2161, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0002692234114154986, |
|
"loss": 2.2267, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00026552603591233873, |
|
"loss": 2.2418, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00026180339887498953, |
|
"loss": 2.1828, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00025805693545089247, |
|
"loss": 2.1103, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00025428808997301485, |
|
"loss": 2.2022, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002504983154030316, |
|
"loss": 2.1691, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00024668907277118114, |
|
"loss": 2.1787, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00024286183061301015, |
|
"loss": 2.2069, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0002390180644032257, |
|
"loss": 2.2609, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00023515925598687094, |
|
"loss": 2.2089, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0002312868930080462, |
|
"loss": 2.1889, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00022740246833639365, |
|
"loss": 2.2167, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00022350747949156756, |
|
"loss": 2.1636, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0002196034280659122, |
|
"loss": 2.2215, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.000215691819145569, |
|
"loss": 2.1903, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0002117741607302378, |
|
"loss": 2.2131, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00020785196315181376, |
|
"loss": 2.6465, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00020392673849212566, |
|
"loss": 2.18, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0911, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00019607326150787436, |
|
"loss": 2.1044, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00019214803684818634, |
|
"loss": 2.1266, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00018822583926976218, |
|
"loss": 2.1559, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00018430818085443104, |
|
"loss": 2.1522, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00018039657193408788, |
|
"loss": 2.1535, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00017649252050843252, |
|
"loss": 2.2344, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00017259753166360642, |
|
"loss": 2.121, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00016871310699195379, |
|
"loss": 2.1426, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0001648407440131291, |
|
"loss": 2.1322, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00016098193559677438, |
|
"loss": 2.1529, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0001571381693869899, |
|
"loss": 2.18, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.000153310927228819, |
|
"loss": 2.1437, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0001495016845969684, |
|
"loss": 2.1725, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00014571191002698517, |
|
"loss": 2.1208, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00014194306454910757, |
|
"loss": 2.1427, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00013819660112501054, |
|
"loss": 2.1056, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00013447396408766132, |
|
"loss": 2.1255, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00013077658858450138, |
|
"loss": 2.1658, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00012710590002417007, |
|
"loss": 2.1443, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00012346331352698205, |
|
"loss": 2.1308, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00011985023337937184, |
|
"loss": 2.1563, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00011626805249251445, |
|
"loss": 2.1129, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00011271815186533155, |
|
"loss": 2.1279, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00010920190005209065, |
|
"loss": 2.0998, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00010572065263480046, |
|
"loss": 2.1541, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00010227575170060909, |
|
"loss": 2.162, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 9.886852532440312e-05, |
|
"loss": 2.1697, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 9.550028705681025e-05, |
|
"loss": 2.1428, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 9.217233541779995e-05, |
|
"loss": 2.0846, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 8.888595339607961e-05, |
|
"loss": 2.136, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 8.564240795447758e-05, |
|
"loss": 2.2081, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 8.24429495415054e-05, |
|
"loss": 2.097, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 7.928881160928572e-05, |
|
"loss": 2.1502, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 7.618121013803319e-05, |
|
"loss": 2.1683, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 7.312134316727093e-05, |
|
"loss": 2.1311, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 7.011039033396329e-05, |
|
"loss": 2.15, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 6.714951241774372e-05, |
|
"loss": 2.1618, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 6.423985089341164e-05, |
|
"loss": 2.1256, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 6.138252749087286e-05, |
|
"loss": 2.194, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 5.857864376269051e-05, |
|
"loss": 2.1898, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 5.5829280659416237e-05, |
|
"loss": 2.1065, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 5.313549811286293e-05, |
|
"loss": 2.0985, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 5.0498334627480615e-05, |
|
"loss": 2.1658, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.7918806879993814e-05, |
|
"loss": 2.1722, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.53979093274526e-05, |
|
"loss": 2.083, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 4.293661382385106e-05, |
|
"loss": 2.1665, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.05358692454586e-05, |
|
"loss": 2.133, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 3.819660112501053e-05, |
|
"loss": 2.1471, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 3.591971129489728e-05, |
|
"loss": 2.1791, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 3.370607753949093e-05, |
|
"loss": 2.072, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 3.1556553256742713e-05, |
|
"loss": 2.1349, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 2.9471967129181565e-05, |
|
"loss": 2.1705, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 2.7453122804441634e-05, |
|
"loss": 2.1388, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 2.5500798585440567e-05, |
|
"loss": 2.1586, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 2.3615747130329013e-05, |
|
"loss": 2.1193, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 2.1798695162326442e-05, |
|
"loss": 2.0693, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.0050343189555743e-05, |
|
"loss": 2.1118, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 1.8371365234983727e-05, |
|
"loss": 2.1395, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 1.676240857657283e-05, |
|
"loss": 2.1948, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 1.5224093497742653e-05, |
|
"loss": 2.1251, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 1.3757013048239286e-05, |
|
"loss": 2.0808, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.236173281550319e-05, |
|
"loss": 2.1342, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.1038790706624391e-05, |
|
"loss": 2.0986, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 9.788696740969295e-06, |
|
"loss": 2.123, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 8.611932853558236e-06, |
|
"loss": 2.094, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 7.508952709270567e-06, |
|
"loss": 2.1003, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.480181527948048e-06, |
|
"loss": 2.188, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 5.5260159204646885e-06, |
|
"loss": 2.1912, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.646823735825523e-06, |
|
"loss": 2.1468, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 3.842943919353914e-06, |
|
"loss": 2.1543, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 3.114686382021681e-06, |
|
"loss": 2.0745, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 2.462331880972468e-06, |
|
"loss": 2.1252, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.88613191128455e-06, |
|
"loss": 2.1826, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.3863086090147415e-06, |
|
"loss": 2.159, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 9.630546655606364e-07, |
|
"loss": 2.1398, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 6.165332533744073e-07, |
|
"loss": 2.1637, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 3.468779630568353e-07, |
|
"loss": 2.0743, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 1.5419275185541982e-07, |
|
"loss": 2.1162, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 3.855190358703631e-08, |
|
"loss": 2.1847, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0, |
|
"loss": 2.092, |
|
"step": 330 |
|
} |
|
], |
|
"max_steps": 330, |
|
"num_train_epochs": 2, |
|
"total_flos": 9.774541129058877e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|