|
{ |
|
"best_metric": 0.03369523584842682, |
|
"best_model_checkpoint": "./windturbine_outputs/checkpoint-394", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 985, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.050761421319796954, |
|
"grad_norm": 46.4831657409668, |
|
"learning_rate": 1.9796954314720812e-05, |
|
"loss": 1.233, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10152284263959391, |
|
"grad_norm": 181.1588592529297, |
|
"learning_rate": 1.9593908629441626e-05, |
|
"loss": 0.7105, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15228426395939088, |
|
"grad_norm": 23.9494686126709, |
|
"learning_rate": 1.939086294416244e-05, |
|
"loss": 0.499, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20304568527918782, |
|
"grad_norm": 51.11117935180664, |
|
"learning_rate": 1.918781725888325e-05, |
|
"loss": 0.3099, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.25380710659898476, |
|
"grad_norm": 47.91529083251953, |
|
"learning_rate": 1.8984771573604063e-05, |
|
"loss": 0.3527, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.30456852791878175, |
|
"grad_norm": 655.8473510742188, |
|
"learning_rate": 1.8781725888324877e-05, |
|
"loss": 0.4137, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3553299492385787, |
|
"grad_norm": 1.7267569303512573, |
|
"learning_rate": 1.8578680203045687e-05, |
|
"loss": 0.3445, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.40609137055837563, |
|
"grad_norm": 11.01191234588623, |
|
"learning_rate": 1.8375634517766498e-05, |
|
"loss": 0.3102, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.45685279187817257, |
|
"grad_norm": 169.2871551513672, |
|
"learning_rate": 1.817258883248731e-05, |
|
"loss": 0.3288, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5076142131979695, |
|
"grad_norm": 31.255569458007812, |
|
"learning_rate": 1.7969543147208125e-05, |
|
"loss": 0.1398, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5583756345177665, |
|
"grad_norm": 33.036746978759766, |
|
"learning_rate": 1.7766497461928935e-05, |
|
"loss": 0.1228, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.6091370558375635, |
|
"grad_norm": 0.6117672920227051, |
|
"learning_rate": 1.7563451776649745e-05, |
|
"loss": 0.1126, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6598984771573604, |
|
"grad_norm": 1.1897255182266235, |
|
"learning_rate": 1.736040609137056e-05, |
|
"loss": 0.0152, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7106598984771574, |
|
"grad_norm": 438.8088073730469, |
|
"learning_rate": 1.7157360406091373e-05, |
|
"loss": 0.279, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7614213197969543, |
|
"grad_norm": 492.2366638183594, |
|
"learning_rate": 1.6954314720812183e-05, |
|
"loss": 0.5028, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.8121827411167513, |
|
"grad_norm": 0.6856859922409058, |
|
"learning_rate": 1.6751269035532997e-05, |
|
"loss": 0.0962, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8629441624365483, |
|
"grad_norm": 46.26337814331055, |
|
"learning_rate": 1.654822335025381e-05, |
|
"loss": 0.0995, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.9137055837563451, |
|
"grad_norm": 52.820518493652344, |
|
"learning_rate": 1.634517766497462e-05, |
|
"loss": 0.2824, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9644670050761421, |
|
"grad_norm": 0.23557256162166595, |
|
"learning_rate": 1.614213197969543e-05, |
|
"loss": 0.0604, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.989247311827957, |
|
"eval_loss": 0.0458565279841423, |
|
"eval_runtime": 18.211, |
|
"eval_samples_per_second": 15.32, |
|
"eval_steps_per_second": 1.922, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.015228426395939, |
|
"grad_norm": 4.585972309112549, |
|
"learning_rate": 1.5939086294416245e-05, |
|
"loss": 0.1812, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0659898477157361, |
|
"grad_norm": 0.8356128931045532, |
|
"learning_rate": 1.573604060913706e-05, |
|
"loss": 0.0418, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.116751269035533, |
|
"grad_norm": 249.66070556640625, |
|
"learning_rate": 1.553299492385787e-05, |
|
"loss": 0.1217, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.16751269035533, |
|
"grad_norm": 131.45016479492188, |
|
"learning_rate": 1.5329949238578682e-05, |
|
"loss": 0.2738, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.218274111675127, |
|
"grad_norm": 0.0076106940396130085, |
|
"learning_rate": 1.5126903553299494e-05, |
|
"loss": 0.0528, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.2690355329949239, |
|
"grad_norm": 191.85903930664062, |
|
"learning_rate": 1.4923857868020306e-05, |
|
"loss": 0.096, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.3197969543147208, |
|
"grad_norm": 43.89326477050781, |
|
"learning_rate": 1.4720812182741118e-05, |
|
"loss": 0.1814, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.3705583756345177, |
|
"grad_norm": 0.0087745301425457, |
|
"learning_rate": 1.4517766497461929e-05, |
|
"loss": 0.1263, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.4213197969543148, |
|
"grad_norm": 342.1986083984375, |
|
"learning_rate": 1.4314720812182742e-05, |
|
"loss": 0.3518, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.4720812182741116, |
|
"grad_norm": 0.011494699865579605, |
|
"learning_rate": 1.4111675126903554e-05, |
|
"loss": 0.0469, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.5228426395939088, |
|
"grad_norm": 0.02110651694238186, |
|
"learning_rate": 1.3908629441624366e-05, |
|
"loss": 0.0432, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.5736040609137056, |
|
"grad_norm": 16.259416580200195, |
|
"learning_rate": 1.3705583756345178e-05, |
|
"loss": 0.0037, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.6243654822335025, |
|
"grad_norm": 1.5435930490493774, |
|
"learning_rate": 1.3502538071065992e-05, |
|
"loss": 0.0055, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.6751269035532994, |
|
"grad_norm": 0.022236082702875137, |
|
"learning_rate": 1.3299492385786802e-05, |
|
"loss": 0.0251, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.7258883248730963, |
|
"grad_norm": 0.002686608349904418, |
|
"learning_rate": 1.3096446700507614e-05, |
|
"loss": 0.0089, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.7766497461928934, |
|
"grad_norm": 0.48235443234443665, |
|
"learning_rate": 1.2893401015228428e-05, |
|
"loss": 0.067, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.8274111675126905, |
|
"grad_norm": 0.0026509715244174004, |
|
"learning_rate": 1.269035532994924e-05, |
|
"loss": 0.0734, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.8781725888324874, |
|
"grad_norm": 0.0006726986030116677, |
|
"learning_rate": 1.2487309644670052e-05, |
|
"loss": 0.1744, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.9289340101522843, |
|
"grad_norm": 0.00010891614510910586, |
|
"learning_rate": 1.2284263959390864e-05, |
|
"loss": 0.0148, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.9796954314720812, |
|
"grad_norm": 0.12294577062129974, |
|
"learning_rate": 1.2081218274111678e-05, |
|
"loss": 0.1309, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.985663082437276, |
|
"eval_loss": 0.03369523584842682, |
|
"eval_runtime": 17.3849, |
|
"eval_samples_per_second": 16.048, |
|
"eval_steps_per_second": 2.013, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 2.030456852791878, |
|
"grad_norm": 7.998709201812744, |
|
"learning_rate": 1.1878172588832488e-05, |
|
"loss": 0.2421, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.081218274111675, |
|
"grad_norm": 24.342329025268555, |
|
"learning_rate": 1.16751269035533e-05, |
|
"loss": 0.2092, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.1319796954314723, |
|
"grad_norm": 0.0385918989777565, |
|
"learning_rate": 1.1472081218274113e-05, |
|
"loss": 0.1426, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.182741116751269, |
|
"grad_norm": 0.007073443848639727, |
|
"learning_rate": 1.1269035532994925e-05, |
|
"loss": 0.1668, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.233502538071066, |
|
"grad_norm": 0.11695539951324463, |
|
"learning_rate": 1.1065989847715737e-05, |
|
"loss": 0.0358, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.284263959390863, |
|
"grad_norm": 10.85909652709961, |
|
"learning_rate": 1.0862944162436548e-05, |
|
"loss": 0.0129, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.33502538071066, |
|
"grad_norm": 83.56989288330078, |
|
"learning_rate": 1.0659898477157361e-05, |
|
"loss": 0.3374, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.3857868020304567, |
|
"grad_norm": 0.001134849968366325, |
|
"learning_rate": 1.0456852791878173e-05, |
|
"loss": 0.2406, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.436548223350254, |
|
"grad_norm": 0.303134560585022, |
|
"learning_rate": 1.0253807106598985e-05, |
|
"loss": 0.0009, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.487309644670051, |
|
"grad_norm": 1.2032800912857056, |
|
"learning_rate": 1.0050761421319797e-05, |
|
"loss": 0.0209, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.5380710659898478, |
|
"grad_norm": 0.6638129949569702, |
|
"learning_rate": 9.84771573604061e-06, |
|
"loss": 0.0677, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.5888324873096447, |
|
"grad_norm": 0.6139811277389526, |
|
"learning_rate": 9.644670050761421e-06, |
|
"loss": 0.2713, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.6395939086294415, |
|
"grad_norm": 0.042041704058647156, |
|
"learning_rate": 9.441624365482235e-06, |
|
"loss": 0.0177, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.6903553299492384, |
|
"grad_norm": 3578.884765625, |
|
"learning_rate": 9.238578680203047e-06, |
|
"loss": 0.1765, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.7411167512690353, |
|
"grad_norm": 0.800250232219696, |
|
"learning_rate": 9.035532994923859e-06, |
|
"loss": 0.0518, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.7918781725888326, |
|
"grad_norm": 0.12552772462368011, |
|
"learning_rate": 8.832487309644671e-06, |
|
"loss": 0.1098, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.8426395939086295, |
|
"grad_norm": 0.003844466060400009, |
|
"learning_rate": 8.629441624365483e-06, |
|
"loss": 0.1961, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.8934010152284264, |
|
"grad_norm": 0.0007168629672378302, |
|
"learning_rate": 8.426395939086295e-06, |
|
"loss": 0.0309, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.9441624365482233, |
|
"grad_norm": 0.20322203636169434, |
|
"learning_rate": 8.223350253807107e-06, |
|
"loss": 0.0921, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.99492385786802, |
|
"grad_norm": 0.010694732889533043, |
|
"learning_rate": 8.020304568527919e-06, |
|
"loss": 0.3207, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.978494623655914, |
|
"eval_loss": 0.12610773742198944, |
|
"eval_runtime": 16.6863, |
|
"eval_samples_per_second": 16.72, |
|
"eval_steps_per_second": 2.098, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 3.045685279187817, |
|
"grad_norm": 0.023461902514100075, |
|
"learning_rate": 7.817258883248731e-06, |
|
"loss": 0.2357, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.0964467005076144, |
|
"grad_norm": 0.19518044590950012, |
|
"learning_rate": 7.614213197969543e-06, |
|
"loss": 0.0022, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 3.1472081218274113, |
|
"grad_norm": 0.00014897708024363965, |
|
"learning_rate": 7.411167512690356e-06, |
|
"loss": 0.0005, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 3.197969543147208, |
|
"grad_norm": 294.190673828125, |
|
"learning_rate": 7.208121827411169e-06, |
|
"loss": 0.0252, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 3.248730964467005, |
|
"grad_norm": 0.00014258331793826073, |
|
"learning_rate": 7.0050761421319806e-06, |
|
"loss": 0.0231, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 3.299492385786802, |
|
"grad_norm": 0.006024135742336512, |
|
"learning_rate": 6.8020304568527926e-06, |
|
"loss": 0.1952, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 3.350253807106599, |
|
"grad_norm": 215.45094299316406, |
|
"learning_rate": 6.5989847715736045e-06, |
|
"loss": 0.175, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.401015228426396, |
|
"grad_norm": 0.15660379827022552, |
|
"learning_rate": 6.395939086294417e-06, |
|
"loss": 0.113, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 3.451776649746193, |
|
"grad_norm": 0.0003182363579981029, |
|
"learning_rate": 6.1928934010152285e-06, |
|
"loss": 0.3483, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 3.50253807106599, |
|
"grad_norm": 0.0007056689355522394, |
|
"learning_rate": 5.989847715736041e-06, |
|
"loss": 0.2458, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 3.553299492385787, |
|
"grad_norm": 313.6507263183594, |
|
"learning_rate": 5.7868020304568525e-06, |
|
"loss": 0.1121, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.6040609137055837, |
|
"grad_norm": 0.08964172750711441, |
|
"learning_rate": 5.583756345177665e-06, |
|
"loss": 0.0019, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 3.6548223350253806, |
|
"grad_norm": 0.05380258709192276, |
|
"learning_rate": 5.380710659898477e-06, |
|
"loss": 0.0163, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 3.7055837563451774, |
|
"grad_norm": 261.12548828125, |
|
"learning_rate": 5.17766497461929e-06, |
|
"loss": 0.1856, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 3.7563451776649748, |
|
"grad_norm": 0.021219402551651, |
|
"learning_rate": 4.974619289340102e-06, |
|
"loss": 0.1741, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 3.8071065989847717, |
|
"grad_norm": 0.006986899767071009, |
|
"learning_rate": 4.771573604060914e-06, |
|
"loss": 0.2666, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 3.8578680203045685, |
|
"grad_norm": 0.3382449746131897, |
|
"learning_rate": 4.568527918781726e-06, |
|
"loss": 0.0107, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 3.9086294416243654, |
|
"grad_norm": 6.089676026022062e-05, |
|
"learning_rate": 4.365482233502538e-06, |
|
"loss": 0.1188, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 3.9593908629441623, |
|
"grad_norm": 0.06970903277397156, |
|
"learning_rate": 4.162436548223351e-06, |
|
"loss": 0.2517, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.992831541218638, |
|
"eval_loss": 0.04906105622649193, |
|
"eval_runtime": 17.0919, |
|
"eval_samples_per_second": 16.323, |
|
"eval_steps_per_second": 2.048, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 4.01015228426396, |
|
"grad_norm": 0.7618089914321899, |
|
"learning_rate": 3.959390862944163e-06, |
|
"loss": 0.1249, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 4.060913705583756, |
|
"grad_norm": 0.11002158373594284, |
|
"learning_rate": 3.756345177664975e-06, |
|
"loss": 0.0001, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.111675126903553, |
|
"grad_norm": 0.009345640428364277, |
|
"learning_rate": 3.5532994923857873e-06, |
|
"loss": 0.0, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 4.16243654822335, |
|
"grad_norm": 0.0008610596996732056, |
|
"learning_rate": 3.3502538071065993e-06, |
|
"loss": 0.1807, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 4.213197969543147, |
|
"grad_norm": 0.0372774712741375, |
|
"learning_rate": 3.1472081218274113e-06, |
|
"loss": 0.0744, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 4.2639593908629445, |
|
"grad_norm": 0.026972278952598572, |
|
"learning_rate": 2.9441624365482237e-06, |
|
"loss": 0.0216, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 4.314720812182741, |
|
"grad_norm": 0.14636941254138947, |
|
"learning_rate": 2.7411167512690357e-06, |
|
"loss": 0.0692, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 4.365482233502538, |
|
"grad_norm": 225.23846435546875, |
|
"learning_rate": 2.5380710659898476e-06, |
|
"loss": 0.2541, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 4.416243654822335, |
|
"grad_norm": 0.1878076195716858, |
|
"learning_rate": 2.33502538071066e-06, |
|
"loss": 0.0001, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 4.467005076142132, |
|
"grad_norm": 0.011137721128761768, |
|
"learning_rate": 2.1319796954314725e-06, |
|
"loss": 0.0, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 4.517766497461929, |
|
"grad_norm": 8.737370808376e-05, |
|
"learning_rate": 1.9289340101522844e-06, |
|
"loss": 0.0004, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 4.568527918781726, |
|
"grad_norm": 0.0015903498278930783, |
|
"learning_rate": 1.7258883248730964e-06, |
|
"loss": 0.2335, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.619289340101523, |
|
"grad_norm": 0.02763758972287178, |
|
"learning_rate": 1.5228426395939088e-06, |
|
"loss": 0.0003, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 4.67005076142132, |
|
"grad_norm": 0.4715072214603424, |
|
"learning_rate": 1.319796954314721e-06, |
|
"loss": 0.2327, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 4.720812182741117, |
|
"grad_norm": 0.11949143558740616, |
|
"learning_rate": 1.116751269035533e-06, |
|
"loss": 0.0734, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 4.771573604060913, |
|
"grad_norm": 362.79876708984375, |
|
"learning_rate": 9.137055837563452e-07, |
|
"loss": 0.0735, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 4.822335025380711, |
|
"grad_norm": 0.0028335938695818186, |
|
"learning_rate": 7.106598984771574e-07, |
|
"loss": 0.2299, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 4.873096446700508, |
|
"grad_norm": 0.015159722417593002, |
|
"learning_rate": 5.076142131979696e-07, |
|
"loss": 0.0003, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 4.9238578680203045, |
|
"grad_norm": 0.07444437593221664, |
|
"learning_rate": 3.0456852791878176e-07, |
|
"loss": 0.0, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 4.974619289340102, |
|
"grad_norm": 0.0017945471918210387, |
|
"learning_rate": 1.0152284263959391e-07, |
|
"loss": 0.1549, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.996415770609319, |
|
"eval_loss": 0.04717279225587845, |
|
"eval_runtime": 17.0835, |
|
"eval_samples_per_second": 16.332, |
|
"eval_steps_per_second": 2.049, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 985, |
|
"total_flos": 1.814619221089321e+18, |
|
"train_loss": 0.15722184067514908, |
|
"train_runtime": 1696.7849, |
|
"train_samples_per_second": 4.644, |
|
"train_steps_per_second": 0.581 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 985, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.814619221089321e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|