|
{ |
|
"best_metric": 0.9895390868186951, |
|
"best_model_checkpoint": "/kaggle/output/checkpoint-49000", |
|
"epoch": 2.077900912646675, |
|
"eval_steps": 1000, |
|
"global_step": 51000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.7777777777777777e-11, |
|
"loss": 1.2076, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.7750000000000004e-08, |
|
"loss": 1.1301, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.3321357285429142, |
|
"eval_loss": 1.1047232151031494, |
|
"eval_runtime": 50.8344, |
|
"eval_samples_per_second": 98.555, |
|
"eval_steps_per_second": 12.334, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 5.5527777777777784e-08, |
|
"loss": 1.1286, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.3377245508982036, |
|
"eval_loss": 1.1038333177566528, |
|
"eval_runtime": 50.8114, |
|
"eval_samples_per_second": 98.6, |
|
"eval_steps_per_second": 12.34, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.327777777777778e-08, |
|
"loss": 1.1214, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.3409181636726547, |
|
"eval_loss": 1.103529930114746, |
|
"eval_runtime": 50.8258, |
|
"eval_samples_per_second": 98.572, |
|
"eval_steps_per_second": 12.336, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.1105555555555557e-07, |
|
"loss": 1.1249, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.3469061876247505, |
|
"eval_loss": 1.1000434160232544, |
|
"eval_runtime": 50.6414, |
|
"eval_samples_per_second": 98.931, |
|
"eval_steps_per_second": 12.381, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.3880555555555558e-07, |
|
"loss": 1.1206, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.34171656686626745, |
|
"eval_loss": 1.0996097326278687, |
|
"eval_runtime": 50.6956, |
|
"eval_samples_per_second": 98.825, |
|
"eval_steps_per_second": 12.368, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.6658333333333335e-07, |
|
"loss": 1.1161, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.3590818363273453, |
|
"eval_loss": 1.096977710723877, |
|
"eval_runtime": 50.5826, |
|
"eval_samples_per_second": 99.046, |
|
"eval_steps_per_second": 12.396, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9433333333333334e-07, |
|
"loss": 1.1138, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.3532934131736527, |
|
"eval_loss": 1.0949815511703491, |
|
"eval_runtime": 50.5604, |
|
"eval_samples_per_second": 99.089, |
|
"eval_steps_per_second": 12.401, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.2211111111111114e-07, |
|
"loss": 1.1135, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.36327345309381237, |
|
"eval_loss": 1.093558430671692, |
|
"eval_runtime": 50.608, |
|
"eval_samples_per_second": 98.996, |
|
"eval_steps_per_second": 12.389, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.4986111111111113e-07, |
|
"loss": 1.1063, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.35548902195608784, |
|
"eval_loss": 1.0992107391357422, |
|
"eval_runtime": 50.5434, |
|
"eval_samples_per_second": 99.123, |
|
"eval_steps_per_second": 12.405, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.776388888888889e-07, |
|
"loss": 1.1083, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.3836327345309381, |
|
"eval_loss": 1.0920206308364868, |
|
"eval_runtime": 50.5559, |
|
"eval_samples_per_second": 99.098, |
|
"eval_steps_per_second": 12.402, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.053888888888889e-07, |
|
"loss": 1.1044, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.38762475049900197, |
|
"eval_loss": 1.0904459953308105, |
|
"eval_runtime": 50.9725, |
|
"eval_samples_per_second": 98.288, |
|
"eval_steps_per_second": 12.301, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.331666666666667e-07, |
|
"loss": 1.1019, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.3912175648702595, |
|
"eval_loss": 1.088543176651001, |
|
"eval_runtime": 50.8297, |
|
"eval_samples_per_second": 98.564, |
|
"eval_steps_per_second": 12.335, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.609166666666667e-07, |
|
"loss": 1.1032, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.3435129740518962, |
|
"eval_loss": 1.0940682888031006, |
|
"eval_runtime": 51.0031, |
|
"eval_samples_per_second": 98.229, |
|
"eval_steps_per_second": 12.293, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.886944444444445e-07, |
|
"loss": 1.1006, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.36826347305389223, |
|
"eval_loss": 1.0924910306930542, |
|
"eval_runtime": 51.2753, |
|
"eval_samples_per_second": 97.708, |
|
"eval_steps_per_second": 12.228, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.164444444444445e-07, |
|
"loss": 1.1, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.38323353293413176, |
|
"eval_loss": 1.0879395008087158, |
|
"eval_runtime": 51.3783, |
|
"eval_samples_per_second": 97.512, |
|
"eval_steps_per_second": 12.204, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.442222222222223e-07, |
|
"loss": 1.0982, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.3964071856287425, |
|
"eval_loss": 1.0822893381118774, |
|
"eval_runtime": 51.2346, |
|
"eval_samples_per_second": 97.786, |
|
"eval_steps_per_second": 12.238, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.7197222222222224e-07, |
|
"loss": 1.0953, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.4121756487025948, |
|
"eval_loss": 1.0795228481292725, |
|
"eval_runtime": 51.31, |
|
"eval_samples_per_second": 97.642, |
|
"eval_steps_per_second": 12.22, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.997500000000001e-07, |
|
"loss": 1.0943, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.42015968063872255, |
|
"eval_loss": 1.079458236694336, |
|
"eval_runtime": 51.3255, |
|
"eval_samples_per_second": 97.612, |
|
"eval_steps_per_second": 12.216, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.275e-07, |
|
"loss": 1.0885, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.437125748502994, |
|
"eval_loss": 1.0667964220046997, |
|
"eval_runtime": 51.1737, |
|
"eval_samples_per_second": 97.902, |
|
"eval_steps_per_second": 12.252, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.552777777777778e-07, |
|
"loss": 1.0792, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_accuracy": 0.4315369261477046, |
|
"eval_loss": 1.0628777742385864, |
|
"eval_runtime": 51.4162, |
|
"eval_samples_per_second": 97.44, |
|
"eval_steps_per_second": 12.195, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 5.830277777777779e-07, |
|
"loss": 1.0755, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.4343313373253493, |
|
"eval_loss": 1.0604455471038818, |
|
"eval_runtime": 51.2629, |
|
"eval_samples_per_second": 97.731, |
|
"eval_steps_per_second": 12.231, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 6.108055555555556e-07, |
|
"loss": 1.073, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.43353293413173655, |
|
"eval_loss": 1.0552711486816406, |
|
"eval_runtime": 51.2402, |
|
"eval_samples_per_second": 97.775, |
|
"eval_steps_per_second": 12.236, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.385555555555556e-07, |
|
"loss": 1.0699, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_accuracy": 0.4313373253493014, |
|
"eval_loss": 1.052398920059204, |
|
"eval_runtime": 51.1758, |
|
"eval_samples_per_second": 97.898, |
|
"eval_steps_per_second": 12.252, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.663333333333334e-07, |
|
"loss": 1.0695, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.4407185628742515, |
|
"eval_loss": 1.047132968902588, |
|
"eval_runtime": 51.3549, |
|
"eval_samples_per_second": 97.556, |
|
"eval_steps_per_second": 12.209, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.940833333333334e-07, |
|
"loss": 1.0656, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_accuracy": 0.4439121756487026, |
|
"eval_loss": 1.045853853225708, |
|
"eval_runtime": 51.1548, |
|
"eval_samples_per_second": 97.938, |
|
"eval_steps_per_second": 12.257, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 7.218611111111112e-07, |
|
"loss": 1.0645, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"eval_accuracy": 0.43273453093812375, |
|
"eval_loss": 1.0467982292175293, |
|
"eval_runtime": 51.1774, |
|
"eval_samples_per_second": 97.895, |
|
"eval_steps_per_second": 12.252, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 7.496111111111112e-07, |
|
"loss": 1.0639, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.43313373253493015, |
|
"eval_loss": 1.0436949729919434, |
|
"eval_runtime": 51.1313, |
|
"eval_samples_per_second": 97.983, |
|
"eval_steps_per_second": 12.263, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 7.77388888888889e-07, |
|
"loss": 1.0638, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_accuracy": 0.436127744510978, |
|
"eval_loss": 1.0424726009368896, |
|
"eval_runtime": 51.2359, |
|
"eval_samples_per_second": 97.783, |
|
"eval_steps_per_second": 12.238, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.051388888888889e-07, |
|
"loss": 1.0591, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_accuracy": 0.43852295409181635, |
|
"eval_loss": 1.0379680395126343, |
|
"eval_runtime": 51.0917, |
|
"eval_samples_per_second": 98.059, |
|
"eval_steps_per_second": 12.272, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 8.329166666666667e-07, |
|
"loss": 1.0571, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_accuracy": 0.43932135728542915, |
|
"eval_loss": 1.034009575843811, |
|
"eval_runtime": 51.187, |
|
"eval_samples_per_second": 97.876, |
|
"eval_steps_per_second": 12.249, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 8.606666666666668e-07, |
|
"loss": 1.0609, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.4489021956087824, |
|
"eval_loss": 1.0380228757858276, |
|
"eval_runtime": 51.2533, |
|
"eval_samples_per_second": 97.75, |
|
"eval_steps_per_second": 12.233, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.884444444444445e-07, |
|
"loss": 1.0536, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_accuracy": 0.4347305389221557, |
|
"eval_loss": 1.035749912261963, |
|
"eval_runtime": 51.1708, |
|
"eval_samples_per_second": 97.907, |
|
"eval_steps_per_second": 12.253, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 9.161944444444445e-07, |
|
"loss": 1.0526, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"eval_accuracy": 0.4471057884231537, |
|
"eval_loss": 1.0298662185668945, |
|
"eval_runtime": 51.2742, |
|
"eval_samples_per_second": 97.71, |
|
"eval_steps_per_second": 12.228, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 9.439722222222223e-07, |
|
"loss": 1.0454, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_accuracy": 0.449500998003992, |
|
"eval_loss": 1.028323769569397, |
|
"eval_runtime": 51.2636, |
|
"eval_samples_per_second": 97.73, |
|
"eval_steps_per_second": 12.231, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 9.717222222222224e-07, |
|
"loss": 1.0507, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_accuracy": 0.4554890219560878, |
|
"eval_loss": 1.0231916904449463, |
|
"eval_runtime": 51.2328, |
|
"eval_samples_per_second": 97.789, |
|
"eval_steps_per_second": 12.238, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 9.995000000000001e-07, |
|
"loss": 1.0462, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_accuracy": 0.4590818363273453, |
|
"eval_loss": 1.02061927318573, |
|
"eval_runtime": 50.6455, |
|
"eval_samples_per_second": 98.923, |
|
"eval_steps_per_second": 12.38, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.02725e-06, |
|
"loss": 1.0423, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"eval_accuracy": 0.4722554890219561, |
|
"eval_loss": 1.0115904808044434, |
|
"eval_runtime": 51.0173, |
|
"eval_samples_per_second": 98.202, |
|
"eval_steps_per_second": 12.29, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.055027777777778e-06, |
|
"loss": 1.041, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"eval_accuracy": 0.47365269461077847, |
|
"eval_loss": 1.0123876333236694, |
|
"eval_runtime": 51.1777, |
|
"eval_samples_per_second": 97.894, |
|
"eval_steps_per_second": 12.251, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.0827777777777778e-06, |
|
"loss": 1.0422, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_accuracy": 0.46367265469061875, |
|
"eval_loss": 1.019059181213379, |
|
"eval_runtime": 51.1786, |
|
"eval_samples_per_second": 97.892, |
|
"eval_steps_per_second": 12.251, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 1.1105555555555556e-06, |
|
"loss": 1.034, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_accuracy": 0.4782435129740519, |
|
"eval_loss": 1.0045363903045654, |
|
"eval_runtime": 51.2481, |
|
"eval_samples_per_second": 97.76, |
|
"eval_steps_per_second": 12.235, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 1.1383055555555557e-06, |
|
"loss": 1.0349, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"eval_accuracy": 0.4550898203592814, |
|
"eval_loss": 1.0253337621688843, |
|
"eval_runtime": 51.219, |
|
"eval_samples_per_second": 97.815, |
|
"eval_steps_per_second": 12.242, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.1660833333333334e-06, |
|
"loss": 1.0338, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"eval_accuracy": 0.480439121756487, |
|
"eval_loss": 1.000636339187622, |
|
"eval_runtime": 51.1853, |
|
"eval_samples_per_second": 97.88, |
|
"eval_steps_per_second": 12.25, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 1.1938333333333335e-06, |
|
"loss": 1.031, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_accuracy": 0.45888223552894214, |
|
"eval_loss": 1.0288445949554443, |
|
"eval_runtime": 51.295, |
|
"eval_samples_per_second": 97.67, |
|
"eval_steps_per_second": 12.223, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.2216111111111113e-06, |
|
"loss": 1.0327, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_accuracy": 0.493812375249501, |
|
"eval_loss": 0.9989358186721802, |
|
"eval_runtime": 51.1981, |
|
"eval_samples_per_second": 97.855, |
|
"eval_steps_per_second": 12.247, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.2493611111111112e-06, |
|
"loss": 1.0321, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"eval_accuracy": 0.4870259481037924, |
|
"eval_loss": 1.003555417060852, |
|
"eval_runtime": 50.8148, |
|
"eval_samples_per_second": 98.593, |
|
"eval_steps_per_second": 12.339, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.277138888888889e-06, |
|
"loss": 1.0326, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_accuracy": 0.4810379241516966, |
|
"eval_loss": 1.00687575340271, |
|
"eval_runtime": 50.7043, |
|
"eval_samples_per_second": 98.808, |
|
"eval_steps_per_second": 12.366, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.304888888888889e-06, |
|
"loss": 1.0266, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"eval_accuracy": 0.49840319361277446, |
|
"eval_loss": 0.9924121499061584, |
|
"eval_runtime": 50.7174, |
|
"eval_samples_per_second": 98.783, |
|
"eval_steps_per_second": 12.363, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.3326666666666668e-06, |
|
"loss": 1.0285, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.45788423153692615, |
|
"eval_loss": 1.0277597904205322, |
|
"eval_runtime": 50.7995, |
|
"eval_samples_per_second": 98.623, |
|
"eval_steps_per_second": 12.343, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.3604166666666668e-06, |
|
"loss": 1.0236, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5021956087824351, |
|
"eval_loss": 0.9895390868186951, |
|
"eval_runtime": 50.824, |
|
"eval_samples_per_second": 98.576, |
|
"eval_steps_per_second": 12.337, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.3881944444444446e-06, |
|
"loss": 1.0243, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_accuracy": 0.47984031936127747, |
|
"eval_loss": 1.0068507194519043, |
|
"eval_runtime": 50.7665, |
|
"eval_samples_per_second": 98.687, |
|
"eval_steps_per_second": 12.351, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.4159444444444445e-06, |
|
"loss": 1.0237, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"eval_accuracy": 0.5053892215568863, |
|
"eval_loss": 0.9906230568885803, |
|
"eval_runtime": 50.9162, |
|
"eval_samples_per_second": 98.397, |
|
"eval_steps_per_second": 12.314, |
|
"step": 51000 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 10000000, |
|
"num_train_epochs": 408, |
|
"save_steps": 1000, |
|
"total_flos": 1.066075890599854e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|