|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 50.0, |
|
"global_step": 21000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.7975520491600037, |
|
"eval_mae": 0.7266795039176941, |
|
"eval_mse": 0.7975521087646484, |
|
"eval_rmse": 0.8930577039718628, |
|
"eval_runtime": 2.2756, |
|
"eval_samples_per_second": 184.568, |
|
"eval_steps_per_second": 23.291, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 9.761904761904762e-06, |
|
"loss": 1.111, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.609792947769165, |
|
"eval_mae": 0.6158387660980225, |
|
"eval_mse": 0.609792947769165, |
|
"eval_rmse": 0.7808924317359924, |
|
"eval_runtime": 1.9303, |
|
"eval_samples_per_second": 217.582, |
|
"eval_steps_per_second": 27.457, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 9.523809523809525e-06, |
|
"loss": 0.4808, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.6106507182121277, |
|
"eval_mae": 0.5975062847137451, |
|
"eval_mse": 0.6106507778167725, |
|
"eval_rmse": 0.7814414501190186, |
|
"eval_runtime": 2.7602, |
|
"eval_samples_per_second": 152.163, |
|
"eval_steps_per_second": 19.202, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 9.285714285714288e-06, |
|
"loss": 0.3196, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.5748845338821411, |
|
"eval_mae": 0.584411084651947, |
|
"eval_mse": 0.5748845934867859, |
|
"eval_rmse": 0.7582114338874817, |
|
"eval_runtime": 1.9617, |
|
"eval_samples_per_second": 214.099, |
|
"eval_steps_per_second": 27.017, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 9.047619047619049e-06, |
|
"loss": 0.2357, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.6165366172790527, |
|
"eval_mae": 0.5906195640563965, |
|
"eval_mse": 0.6165366172790527, |
|
"eval_rmse": 0.785198450088501, |
|
"eval_runtime": 2.7689, |
|
"eval_samples_per_second": 151.687, |
|
"eval_steps_per_second": 19.141, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 8.80952380952381e-06, |
|
"loss": 0.1909, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.5579051375389099, |
|
"eval_mae": 0.5814845561981201, |
|
"eval_mse": 0.5579050779342651, |
|
"eval_rmse": 0.7469304203987122, |
|
"eval_runtime": 1.2975, |
|
"eval_samples_per_second": 323.7, |
|
"eval_steps_per_second": 40.848, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.5374110341072083, |
|
"eval_mae": 0.5643972754478455, |
|
"eval_mse": 0.5374110341072083, |
|
"eval_rmse": 0.7330832481384277, |
|
"eval_runtime": 2.8385, |
|
"eval_samples_per_second": 147.967, |
|
"eval_steps_per_second": 18.672, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 0.157, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.5414829850196838, |
|
"eval_mae": 0.5653696060180664, |
|
"eval_mse": 0.5414829254150391, |
|
"eval_rmse": 0.735855221748352, |
|
"eval_runtime": 2.0614, |
|
"eval_samples_per_second": 203.747, |
|
"eval_steps_per_second": 25.711, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.1334, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.5609115958213806, |
|
"eval_mae": 0.5715270042419434, |
|
"eval_mse": 0.5609115362167358, |
|
"eval_rmse": 0.7489402890205383, |
|
"eval_runtime": 1.9712, |
|
"eval_samples_per_second": 213.066, |
|
"eval_steps_per_second": 26.887, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"learning_rate": 8.095238095238097e-06, |
|
"loss": 0.1199, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.5141286253929138, |
|
"eval_mae": 0.5450649261474609, |
|
"eval_mse": 0.5141286253929138, |
|
"eval_rmse": 0.7170276045799255, |
|
"eval_runtime": 1.9528, |
|
"eval_samples_per_second": 215.081, |
|
"eval_steps_per_second": 27.141, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 10.71, |
|
"learning_rate": 7.857142857142858e-06, |
|
"loss": 0.1112, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 0.5070027112960815, |
|
"eval_mae": 0.5421043634414673, |
|
"eval_mse": 0.5070027112960815, |
|
"eval_rmse": 0.7120411992073059, |
|
"eval_runtime": 1.3515, |
|
"eval_samples_per_second": 310.772, |
|
"eval_steps_per_second": 39.216, |
|
"step": 4620 |
|
}, |
|
{ |
|
"epoch": 11.9, |
|
"learning_rate": 7.61904761904762e-06, |
|
"loss": 0.0983, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 0.5180405378341675, |
|
"eval_mae": 0.5500118732452393, |
|
"eval_mse": 0.5180405378341675, |
|
"eval_rmse": 0.7197503447532654, |
|
"eval_runtime": 1.2448, |
|
"eval_samples_per_second": 337.399, |
|
"eval_steps_per_second": 42.577, |
|
"step": 5040 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 0.5257736444473267, |
|
"eval_mae": 0.561431348323822, |
|
"eval_mse": 0.5257736444473267, |
|
"eval_rmse": 0.7251024842262268, |
|
"eval_runtime": 1.1775, |
|
"eval_samples_per_second": 356.675, |
|
"eval_steps_per_second": 45.009, |
|
"step": 5460 |
|
}, |
|
{ |
|
"epoch": 13.1, |
|
"learning_rate": 7.380952380952382e-06, |
|
"loss": 0.0861, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 0.5195261836051941, |
|
"eval_mae": 0.5508889555931091, |
|
"eval_mse": 0.5195261836051941, |
|
"eval_rmse": 0.7207816243171692, |
|
"eval_runtime": 1.2061, |
|
"eval_samples_per_second": 348.216, |
|
"eval_steps_per_second": 43.942, |
|
"step": 5880 |
|
}, |
|
{ |
|
"epoch": 14.29, |
|
"learning_rate": 7.1428571428571436e-06, |
|
"loss": 0.0833, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.5224341154098511, |
|
"eval_mae": 0.552178144454956, |
|
"eval_mse": 0.5224341154098511, |
|
"eval_rmse": 0.7227960228919983, |
|
"eval_runtime": 1.1671, |
|
"eval_samples_per_second": 359.858, |
|
"eval_steps_per_second": 45.411, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 15.48, |
|
"learning_rate": 6.9047619047619055e-06, |
|
"loss": 0.0741, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 0.5121651291847229, |
|
"eval_mae": 0.5470804572105408, |
|
"eval_mse": 0.5121651291847229, |
|
"eval_rmse": 0.715657114982605, |
|
"eval_runtime": 1.1174, |
|
"eval_samples_per_second": 375.865, |
|
"eval_steps_per_second": 47.431, |
|
"step": 6720 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.0744, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 0.4920870065689087, |
|
"eval_mae": 0.5328148007392883, |
|
"eval_mse": 0.4920870065689087, |
|
"eval_rmse": 0.7014891505241394, |
|
"eval_runtime": 1.1494, |
|
"eval_samples_per_second": 365.42, |
|
"eval_steps_per_second": 46.112, |
|
"step": 7140 |
|
}, |
|
{ |
|
"epoch": 17.86, |
|
"learning_rate": 6.4285714285714295e-06, |
|
"loss": 0.0649, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 0.5028687119483948, |
|
"eval_mae": 0.5357252359390259, |
|
"eval_mse": 0.5028687119483948, |
|
"eval_rmse": 0.7091323733329773, |
|
"eval_runtime": 1.3327, |
|
"eval_samples_per_second": 315.156, |
|
"eval_steps_per_second": 39.77, |
|
"step": 7560 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 0.526236891746521, |
|
"eval_mae": 0.5603699684143066, |
|
"eval_mse": 0.526236891746521, |
|
"eval_rmse": 0.7254218459129333, |
|
"eval_runtime": 1.2696, |
|
"eval_samples_per_second": 330.822, |
|
"eval_steps_per_second": 41.747, |
|
"step": 7980 |
|
}, |
|
{ |
|
"epoch": 19.05, |
|
"learning_rate": 6.1904761904761914e-06, |
|
"loss": 0.0639, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.5124732851982117, |
|
"eval_mae": 0.5440532565116882, |
|
"eval_mse": 0.5124732851982117, |
|
"eval_rmse": 0.7158724069595337, |
|
"eval_runtime": 1.139, |
|
"eval_samples_per_second": 368.742, |
|
"eval_steps_per_second": 46.532, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 20.24, |
|
"learning_rate": 5.9523809523809525e-06, |
|
"loss": 0.0609, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_loss": 0.4891616106033325, |
|
"eval_mae": 0.5339100956916809, |
|
"eval_mse": 0.48916158080101013, |
|
"eval_rmse": 0.6994009017944336, |
|
"eval_runtime": 1.0834, |
|
"eval_samples_per_second": 387.673, |
|
"eval_steps_per_second": 48.921, |
|
"step": 8820 |
|
}, |
|
{ |
|
"epoch": 21.43, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.0547, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_loss": 0.4973764717578888, |
|
"eval_mae": 0.5337387919425964, |
|
"eval_mse": 0.4973764419555664, |
|
"eval_rmse": 0.7052491903305054, |
|
"eval_runtime": 1.1768, |
|
"eval_samples_per_second": 356.886, |
|
"eval_steps_per_second": 45.036, |
|
"step": 9240 |
|
}, |
|
{ |
|
"epoch": 22.62, |
|
"learning_rate": 5.476190476190477e-06, |
|
"loss": 0.0528, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_loss": 0.4779137670993805, |
|
"eval_mae": 0.5240978598594666, |
|
"eval_mse": 0.47791385650634766, |
|
"eval_rmse": 0.6913131475448608, |
|
"eval_runtime": 1.1884, |
|
"eval_samples_per_second": 353.429, |
|
"eval_steps_per_second": 44.599, |
|
"step": 9660 |
|
}, |
|
{ |
|
"epoch": 23.81, |
|
"learning_rate": 5.2380952380952384e-06, |
|
"loss": 0.0514, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 0.483572781085968, |
|
"eval_mae": 0.5290801525115967, |
|
"eval_mse": 0.48357275128364563, |
|
"eval_rmse": 0.695393979549408, |
|
"eval_runtime": 1.1926, |
|
"eval_samples_per_second": 352.184, |
|
"eval_steps_per_second": 44.442, |
|
"step": 10080 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0516, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.4979321360588074, |
|
"eval_mae": 0.5313663482666016, |
|
"eval_mse": 0.49793222546577454, |
|
"eval_rmse": 0.7056431174278259, |
|
"eval_runtime": 1.1676, |
|
"eval_samples_per_second": 359.71, |
|
"eval_steps_per_second": 45.392, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 0.48045653104782104, |
|
"eval_mae": 0.530644416809082, |
|
"eval_mse": 0.48045653104782104, |
|
"eval_rmse": 0.693149745464325, |
|
"eval_runtime": 1.1924, |
|
"eval_samples_per_second": 352.222, |
|
"eval_steps_per_second": 44.447, |
|
"step": 10920 |
|
}, |
|
{ |
|
"epoch": 26.19, |
|
"learning_rate": 4.761904761904762e-06, |
|
"loss": 0.0463, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_loss": 0.48534154891967773, |
|
"eval_mae": 0.5263835191726685, |
|
"eval_mse": 0.48534154891967773, |
|
"eval_rmse": 0.696664571762085, |
|
"eval_runtime": 1.1385, |
|
"eval_samples_per_second": 368.9, |
|
"eval_steps_per_second": 46.552, |
|
"step": 11340 |
|
}, |
|
{ |
|
"epoch": 27.38, |
|
"learning_rate": 4.523809523809524e-06, |
|
"loss": 0.0451, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_loss": 0.4952319264411926, |
|
"eval_mae": 0.5382465720176697, |
|
"eval_mse": 0.4952319264411926, |
|
"eval_rmse": 0.7037271857261658, |
|
"eval_runtime": 1.1142, |
|
"eval_samples_per_second": 376.951, |
|
"eval_steps_per_second": 47.568, |
|
"step": 11760 |
|
}, |
|
{ |
|
"epoch": 28.57, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 0.0428, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_loss": 0.48455557227134705, |
|
"eval_mae": 0.529035210609436, |
|
"eval_mse": 0.48455557227134705, |
|
"eval_rmse": 0.6961002349853516, |
|
"eval_runtime": 1.1847, |
|
"eval_samples_per_second": 354.512, |
|
"eval_steps_per_second": 44.736, |
|
"step": 12180 |
|
}, |
|
{ |
|
"epoch": 29.76, |
|
"learning_rate": 4.047619047619048e-06, |
|
"loss": 0.0417, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_loss": 0.4880525767803192, |
|
"eval_mae": 0.5353544354438782, |
|
"eval_mse": 0.4880525767803192, |
|
"eval_rmse": 0.6986076235771179, |
|
"eval_runtime": 1.2654, |
|
"eval_samples_per_second": 331.922, |
|
"eval_steps_per_second": 41.885, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 30.95, |
|
"learning_rate": 3.80952380952381e-06, |
|
"loss": 0.0404, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_loss": 0.48999953269958496, |
|
"eval_mae": 0.5326849222183228, |
|
"eval_mse": 0.4899995028972626, |
|
"eval_rmse": 0.6999996304512024, |
|
"eval_runtime": 1.2442, |
|
"eval_samples_per_second": 337.554, |
|
"eval_steps_per_second": 42.596, |
|
"step": 13020 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_loss": 0.5017576813697815, |
|
"eval_mae": 0.5423673987388611, |
|
"eval_mse": 0.5017576813697815, |
|
"eval_rmse": 0.7083485722541809, |
|
"eval_runtime": 1.1196, |
|
"eval_samples_per_second": 375.135, |
|
"eval_steps_per_second": 47.339, |
|
"step": 13440 |
|
}, |
|
{ |
|
"epoch": 32.14, |
|
"learning_rate": 3.5714285714285718e-06, |
|
"loss": 0.0391, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_loss": 0.48715752363204956, |
|
"eval_mae": 0.5341249108314514, |
|
"eval_mse": 0.48715752363204956, |
|
"eval_rmse": 0.6979666948318481, |
|
"eval_runtime": 1.1228, |
|
"eval_samples_per_second": 374.062, |
|
"eval_steps_per_second": 47.203, |
|
"step": 13860 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.0373, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_loss": 0.479116827249527, |
|
"eval_mae": 0.5321008563041687, |
|
"eval_mse": 0.479116827249527, |
|
"eval_rmse": 0.6921826601028442, |
|
"eval_runtime": 1.206, |
|
"eval_samples_per_second": 348.254, |
|
"eval_steps_per_second": 43.946, |
|
"step": 14280 |
|
}, |
|
{ |
|
"epoch": 34.52, |
|
"learning_rate": 3.0952380952380957e-06, |
|
"loss": 0.0359, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_loss": 0.48590707778930664, |
|
"eval_mae": 0.5346122980117798, |
|
"eval_mse": 0.48590704798698425, |
|
"eval_rmse": 0.6970703601837158, |
|
"eval_runtime": 0.8892, |
|
"eval_samples_per_second": 472.339, |
|
"eval_steps_per_second": 59.605, |
|
"step": 14700 |
|
}, |
|
{ |
|
"epoch": 35.71, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.0334, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_loss": 0.4766678810119629, |
|
"eval_mae": 0.5263535380363464, |
|
"eval_mse": 0.4766678810119629, |
|
"eval_rmse": 0.690411388874054, |
|
"eval_runtime": 1.286, |
|
"eval_samples_per_second": 326.603, |
|
"eval_steps_per_second": 41.214, |
|
"step": 15120 |
|
}, |
|
{ |
|
"epoch": 36.9, |
|
"learning_rate": 2.6190476190476192e-06, |
|
"loss": 0.0341, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_loss": 0.4740683436393738, |
|
"eval_mae": 0.5217899680137634, |
|
"eval_mse": 0.4740683138370514, |
|
"eval_rmse": 0.6885262131690979, |
|
"eval_runtime": 1.1893, |
|
"eval_samples_per_second": 353.162, |
|
"eval_steps_per_second": 44.566, |
|
"step": 15540 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_loss": 0.478584349155426, |
|
"eval_mae": 0.527285635471344, |
|
"eval_mse": 0.478584349155426, |
|
"eval_rmse": 0.6917979121208191, |
|
"eval_runtime": 1.1477, |
|
"eval_samples_per_second": 365.939, |
|
"eval_steps_per_second": 46.178, |
|
"step": 15960 |
|
}, |
|
{ |
|
"epoch": 38.1, |
|
"learning_rate": 2.380952380952381e-06, |
|
"loss": 0.0325, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_loss": 0.46834418177604675, |
|
"eval_mae": 0.5196450352668762, |
|
"eval_mse": 0.46834418177604675, |
|
"eval_rmse": 0.6843567490577698, |
|
"eval_runtime": 1.1282, |
|
"eval_samples_per_second": 372.276, |
|
"eval_steps_per_second": 46.978, |
|
"step": 16380 |
|
}, |
|
{ |
|
"epoch": 39.29, |
|
"learning_rate": 2.1428571428571427e-06, |
|
"loss": 0.0331, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_loss": 0.46904683113098145, |
|
"eval_mae": 0.522687554359436, |
|
"eval_mse": 0.46904683113098145, |
|
"eval_rmse": 0.6848699450492859, |
|
"eval_runtime": 1.1578, |
|
"eval_samples_per_second": 362.764, |
|
"eval_steps_per_second": 45.777, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 40.48, |
|
"learning_rate": 1.904761904761905e-06, |
|
"loss": 0.0315, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"eval_loss": 0.46947211027145386, |
|
"eval_mae": 0.5197708010673523, |
|
"eval_mse": 0.46947211027145386, |
|
"eval_rmse": 0.6851803660392761, |
|
"eval_runtime": 1.1711, |
|
"eval_samples_per_second": 358.64, |
|
"eval_steps_per_second": 45.257, |
|
"step": 17220 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 0.0299, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_loss": 0.46514230966567993, |
|
"eval_mae": 0.518059253692627, |
|
"eval_mse": 0.46514227986335754, |
|
"eval_rmse": 0.6820133924484253, |
|
"eval_runtime": 1.1482, |
|
"eval_samples_per_second": 365.797, |
|
"eval_steps_per_second": 46.16, |
|
"step": 17640 |
|
}, |
|
{ |
|
"epoch": 42.86, |
|
"learning_rate": 1.4285714285714286e-06, |
|
"loss": 0.0314, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"eval_loss": 0.4640100300312042, |
|
"eval_mae": 0.51801997423172, |
|
"eval_mse": 0.46400997042655945, |
|
"eval_rmse": 0.6811828017234802, |
|
"eval_runtime": 1.1908, |
|
"eval_samples_per_second": 352.696, |
|
"eval_steps_per_second": 44.507, |
|
"step": 18060 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_loss": 0.4704785943031311, |
|
"eval_mae": 0.5230998992919922, |
|
"eval_mse": 0.4704785943031311, |
|
"eval_rmse": 0.6859143972396851, |
|
"eval_runtime": 1.2463, |
|
"eval_samples_per_second": 337.011, |
|
"eval_steps_per_second": 42.528, |
|
"step": 18480 |
|
}, |
|
{ |
|
"epoch": 44.05, |
|
"learning_rate": 1.1904761904761906e-06, |
|
"loss": 0.0283, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_loss": 0.4678580164909363, |
|
"eval_mae": 0.521861732006073, |
|
"eval_mse": 0.46785807609558105, |
|
"eval_rmse": 0.6840015053749084, |
|
"eval_runtime": 1.2433, |
|
"eval_samples_per_second": 337.812, |
|
"eval_steps_per_second": 42.629, |
|
"step": 18900 |
|
}, |
|
{ |
|
"epoch": 45.24, |
|
"learning_rate": 9.523809523809525e-07, |
|
"loss": 0.0284, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_loss": 0.4686986207962036, |
|
"eval_mae": 0.5229542851448059, |
|
"eval_mse": 0.4686986207962036, |
|
"eval_rmse": 0.6846156716346741, |
|
"eval_runtime": 1.1892, |
|
"eval_samples_per_second": 353.193, |
|
"eval_steps_per_second": 44.57, |
|
"step": 19320 |
|
}, |
|
{ |
|
"epoch": 46.43, |
|
"learning_rate": 7.142857142857143e-07, |
|
"loss": 0.0273, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"eval_loss": 0.46382173895835876, |
|
"eval_mae": 0.5197833776473999, |
|
"eval_mse": 0.4638217091560364, |
|
"eval_rmse": 0.6810445785522461, |
|
"eval_runtime": 1.2872, |
|
"eval_samples_per_second": 326.279, |
|
"eval_steps_per_second": 41.173, |
|
"step": 19740 |
|
}, |
|
{ |
|
"epoch": 47.62, |
|
"learning_rate": 4.7619047619047623e-07, |
|
"loss": 0.0283, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_loss": 0.4631454348564148, |
|
"eval_mae": 0.5184262990951538, |
|
"eval_mse": 0.4631454348564148, |
|
"eval_rmse": 0.6805478930473328, |
|
"eval_runtime": 1.1721, |
|
"eval_samples_per_second": 358.345, |
|
"eval_steps_per_second": 45.22, |
|
"step": 20160 |
|
}, |
|
{ |
|
"epoch": 48.81, |
|
"learning_rate": 2.3809523809523811e-07, |
|
"loss": 0.0268, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"eval_loss": 0.4649342894554138, |
|
"eval_mae": 0.5196540355682373, |
|
"eval_mse": 0.4649342894554138, |
|
"eval_rmse": 0.6818609237670898, |
|
"eval_runtime": 1.1402, |
|
"eval_samples_per_second": 368.356, |
|
"eval_steps_per_second": 46.483, |
|
"step": 20580 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0269, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_loss": 0.46444272994995117, |
|
"eval_mae": 0.5188572406768799, |
|
"eval_mse": 0.46444275975227356, |
|
"eval_rmse": 0.6815003752708435, |
|
"eval_runtime": 1.0966, |
|
"eval_samples_per_second": 382.987, |
|
"eval_steps_per_second": 48.329, |
|
"step": 21000 |
|
} |
|
], |
|
"max_steps": 21000, |
|
"num_train_epochs": 50, |
|
"total_flos": 1.1126765647872e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|