|
06/04/2024 14:41:07 - WARNING - __main__ - Process rank: 0, device: cuda:0, n_gpu: 1distributed training: True, 16-bits training: False |
|
06/04/2024 14:41:09 - WARNING - datasets.fingerprint - Parameter 'transform'=<function main.<locals>.transform_images at 0x14b1456889d0> of the transform datasets.arrow_dataset.Dataset.set_format couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed. |
|
06/04/2024 14:41:09 - WARNING - accelerate.utils.other - Detected kernel version 4.18.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher. |
|
{'loss': 0.2213, 'grad_norm': 15.631693840026855, 'learning_rate': 1.808672788934192e-05, 'epoch': 0.64} |
|
{'loss': 0.2007, 'grad_norm': 9.132320404052734, 'learning_rate': 1.7848569142247456e-05, 'epoch': 0.64} |
|
{'loss': 0.2197, 'grad_norm': 2.836796283721924, 'learning_rate': 1.7610410395152993e-05, 'epoch': 0.65} |
|
{'loss': 0.2092, 'grad_norm': 1.20395028591156, 'learning_rate': 1.737225164805853e-05, 'epoch': 0.65} |
|
{'loss': 0.2228, 'grad_norm': 4.42557954788208, 'learning_rate': 1.7134092900964068e-05, 'epoch': 0.66} |
|
{'loss': 0.2159, 'grad_norm': 7.136639595031738, 'learning_rate': 1.6895934153869605e-05, 'epoch': 0.66} |
|
{'loss': 0.1984, 'grad_norm': 12.695110321044922, 'learning_rate': 1.6657775406775142e-05, 'epoch': 0.67} |
|
{'loss': 0.1913, 'grad_norm': 11.841693878173828, 'learning_rate': 1.641961665968068e-05, 'epoch': 0.67} |
|
{'loss': 0.1911, 'grad_norm': 0.7839029431343079, 'learning_rate': 1.6181457912586216e-05, 'epoch': 0.68} |
|
{'loss': 0.2164, 'grad_norm': 6.188957691192627, 'learning_rate': 1.594329916549175e-05, 'epoch': 0.68} |
|
{'loss': 0.2026, 'grad_norm': 11.414396286010742, 'learning_rate': 1.5705140418397287e-05, 'epoch': 0.69} |
|
{'loss': 0.1877, 'grad_norm': 21.34324836730957, 'learning_rate': 1.5466981671302824e-05, 'epoch': 0.69} |
|
{'loss': 0.1889, 'grad_norm': 10.534087181091309, 'learning_rate': 1.5228822924208361e-05, 'epoch': 0.7} |
|
{'loss': 0.1902, 'grad_norm': 0.4472333788871765, 'learning_rate': 1.4990664177113897e-05, 'epoch': 0.7} |
|
{'loss': 0.1929, 'grad_norm': 27.742639541625977, 'learning_rate': 1.4752505430019434e-05, 'epoch': 0.7} |
|
{'loss': 0.1754, 'grad_norm': 12.095074653625488, 'learning_rate': 1.451434668292497e-05, 'epoch': 0.71} |
|
{'loss': 0.1866, 'grad_norm': 10.153932571411133, 'learning_rate': 1.4276187935830506e-05, 'epoch': 0.71} |
|
{'loss': 0.2004, 'grad_norm': 9.065908432006836, 'learning_rate': 1.4038029188736043e-05, 'epoch': 0.72} |
|
{'loss': 0.175, 'grad_norm': 1.7107542753219604, 'learning_rate': 1.3799870441641582e-05, 'epoch': 0.72} |
|
{'loss': 0.1865, 'grad_norm': 1.4907644987106323, 'learning_rate': 1.356171169454712e-05, 'epoch': 0.73} |
|
{'loss': 0.1694, 'grad_norm': 0.11667291074991226, 'learning_rate': 1.3323552947452655e-05, 'epoch': 0.73} |
|
{'loss': 0.1821, 'grad_norm': 29.062976837158203, 'learning_rate': 1.3085394200358192e-05, 'epoch': 0.74} |
|
{'loss': 0.1692, 'grad_norm': 7.0561113357543945, 'learning_rate': 1.2847235453263729e-05, 'epoch': 0.74} |
|
{'loss': 0.1711, 'grad_norm': 7.916496753692627, 'learning_rate': 1.2609076706169264e-05, 'epoch': 0.75} |
|
{'loss': 0.1795, 'grad_norm': 0.009064608253538609, 'learning_rate': 1.2370917959074802e-05, 'epoch': 0.75} |
|
{'loss': 0.1732, 'grad_norm': 16.94624900817871, 'learning_rate': 1.2132759211980339e-05, 'epoch': 0.76} |
|
{'loss': 0.1599, 'grad_norm': 24.09770965576172, 'learning_rate': 1.1894600464885874e-05, 'epoch': 0.76} |
|
{'loss': 0.1702, 'grad_norm': 17.340219497680664, 'learning_rate': 1.1656441717791411e-05, 'epoch': 0.77} |
|
{'loss': 0.1679, 'grad_norm': 0.015018216334283352, 'learning_rate': 1.1418282970696948e-05, 'epoch': 0.77} |
|
{'loss': 0.1832, 'grad_norm': 16.005643844604492, 'learning_rate': 1.1180124223602485e-05, 'epoch': 0.78} |
|
{'loss': 0.1505, 'grad_norm': 0.0013979446375742555, 'learning_rate': 1.0941965476508023e-05, 'epoch': 0.78} |
|
{'loss': 0.1663, 'grad_norm': 1.049574375152588, 'learning_rate': 1.0703806729413558e-05, 'epoch': 0.79} |
|
{'loss': 0.1561, 'grad_norm': 5.492803573608398, 'learning_rate': 1.0465647982319095e-05, 'epoch': 0.79} |
|
{'loss': 0.1876, 'grad_norm': 14.530741691589355, 'learning_rate': 1.022748923522463e-05, 'epoch': 0.8} |
|
{'loss': 0.1524, 'grad_norm': 0.0616171695291996, 'learning_rate': 9.98933048813017e-06, 'epoch': 0.8} |
|
{'loss': 0.1588, 'grad_norm': 15.610015869140625, 'learning_rate': 9.751171741035706e-06, 'epoch': 0.8} |
|
{'loss': 0.1779, 'grad_norm': 7.8352885246276855, 'learning_rate': 9.513012993941242e-06, 'epoch': 0.81} |
|
{'loss': 0.1436, 'grad_norm': 21.532123565673828, 'learning_rate': 9.274854246846779e-06, 'epoch': 0.81} |
|
{'loss': 0.1472, 'grad_norm': 0.025519462302327156, 'learning_rate': 9.036695499752314e-06, 'epoch': 0.82} |
|
{'loss': 0.1525, 'grad_norm': 0.09057486802339554, 'learning_rate': 8.798536752657852e-06, 'epoch': 0.82} |
|
{'loss': 0.1614, 'grad_norm': 0.9066371917724609, 'learning_rate': 8.560378005563389e-06, 'epoch': 0.83} |
|
{'loss': 0.1492, 'grad_norm': 2.612293004989624, 'learning_rate': 8.322219258468926e-06, 'epoch': 0.83} |
|
{'loss': 0.1518, 'grad_norm': 6.420555114746094, 'learning_rate': 8.084060511374463e-06, 'epoch': 0.84} |
|
{'loss': 0.1581, 'grad_norm': 0.04058153182268143, 'learning_rate': 7.845901764279998e-06, 'epoch': 0.84} |
|
{'loss': 0.166, 'grad_norm': 17.142908096313477, 'learning_rate': 7.6077430171855355e-06, 'epoch': 0.85} |
|
{'loss': 0.1613, 'grad_norm': 17.988386154174805, 'learning_rate': 7.369584270091072e-06, 'epoch': 0.85} |
|
{'loss': 0.1448, 'grad_norm': 0.28535395860671997, 'learning_rate': 7.131425522996608e-06, 'epoch': 0.86} |
|
{'loss': 0.1397, 'grad_norm': 0.02261945605278015, 'learning_rate': 6.893266775902146e-06, 'epoch': 0.86} |
|
{'loss': 0.146, 'grad_norm': 15.836788177490234, 'learning_rate': 6.655108028807683e-06, 'epoch': 0.87} |
|
{'loss': 0.1405, 'grad_norm': 0.6049064993858337, 'learning_rate': 6.416949281713219e-06, 'epoch': 0.87} |
|
{'loss': 0.1466, 'grad_norm': 0.08131851255893707, 'learning_rate': 6.178790534618756e-06, 'epoch': 0.88} |
|
{'loss': 0.1398, 'grad_norm': 16.24254608154297, 'learning_rate': 5.940631787524292e-06, 'epoch': 0.88} |
|
{'loss': 0.124, 'grad_norm': 8.061177504714578e-05, 'learning_rate': 5.702473040429829e-06, 'epoch': 0.89} |
|
{'loss': 0.1448, 'grad_norm': 2.3429534435272217, 'learning_rate': 5.464314293335366e-06, 'epoch': 0.89} |
|
{'loss': 0.1304, 'grad_norm': 21.22195053100586, 'learning_rate': 5.2261555462409025e-06, 'epoch': 0.9} |
|
{'loss': 0.1211, 'grad_norm': 22.701601028442383, 'learning_rate': 4.9879967991464396e-06, 'epoch': 0.9} |
|
{'loss': 0.1514, 'grad_norm': 15.280872344970703, 'learning_rate': 4.749838052051976e-06, 'epoch': 0.91} |
|
{'loss': 0.1115, 'grad_norm': 0.01960950717329979, 'learning_rate': 4.511679304957513e-06, 'epoch': 0.91} |
|
{'loss': 0.1571, 'grad_norm': 0.0017501560505479574, 'learning_rate': 4.273520557863049e-06, 'epoch': 0.91} |
|
{'loss': 0.1499, 'grad_norm': 0.05184149742126465, 'learning_rate': 4.035361810768586e-06, 'epoch': 0.92} |
|
{'loss': 0.1272, 'grad_norm': 6.8865180015563965, 'learning_rate': 3.7972030636741226e-06, 'epoch': 0.92} |
|
{'loss': 0.1263, 'grad_norm': 0.10639504343271255, 'learning_rate': 3.5590443165796593e-06, 'epoch': 0.93} |
|
{'loss': 0.133, 'grad_norm': 0.9005939960479736, 'learning_rate': 3.3208855694851965e-06, 'epoch': 0.93} |
|
{'loss': 0.1325, 'grad_norm': 1.1748387813568115, 'learning_rate': 3.082726822390733e-06, 'epoch': 0.94} |
|
{'loss': 0.1319, 'grad_norm': 28.960805892944336, 'learning_rate': 2.8445680752962694e-06, 'epoch': 0.94} |
|
{'loss': 0.1531, 'grad_norm': 8.118181228637695, 'learning_rate': 2.606409328201806e-06, 'epoch': 0.95} |
|
{'loss': 0.1275, 'grad_norm': 0.004353045951575041, 'learning_rate': 2.3682505811073433e-06, 'epoch': 0.95} |
|
{'loss': 0.1428, 'grad_norm': 2.1794090270996094, 'learning_rate': 2.1300918340128795e-06, 'epoch': 0.96} |
|
{'loss': 0.1135, 'grad_norm': 8.74696159362793, 'learning_rate': 1.8919330869184164e-06, 'epoch': 0.96} |
|
{'loss': 0.1391, 'grad_norm': 31.47783851623535, 'learning_rate': 1.6537743398239533e-06, 'epoch': 0.97} |
|
{'loss': 0.12, 'grad_norm': 1.8234331607818604, 'learning_rate': 1.4156155927294898e-06, 'epoch': 0.97} |
|
{'loss': 0.1253, 'grad_norm': 5.736881732940674, 'learning_rate': 1.1774568456350265e-06, 'epoch': 0.98} |
|
{'loss': 0.1216, 'grad_norm': 0.919281005859375, 'learning_rate': 9.392980985405633e-07, 'epoch': 0.98} |
|
{'loss': 0.1245, 'grad_norm': 6.265872001647949, 'learning_rate': 7.011393514460999e-07, 'epoch': 0.99} |
|
{'loss': 0.1298, 'grad_norm': 19.050594329833984, 'learning_rate': 4.629806043516366e-07, 'epoch': 0.99} |
|
{'loss': 0.1177, 'grad_norm': 9.125242233276367, 'learning_rate': 2.2482185725717335e-07, 'epoch': 1.0} |
|
{'train_runtime': 27901.1518, 'train_samples_per_second': 30.098, 'train_steps_per_second': 3.762, 'train_loss': 0.05896880445613715, 'epoch': 1.0} |
|
***** train metrics ***** |
|
epoch = 1.0 |
|
total_flos = 140683255GF |
|
train_loss = 0.059 |
|
train_runtime = 7:45:01.15 |
|
train_samples_per_second = 30.098 |
|
train_steps_per_second = 3.762 |
|
|