|
{ |
|
"best_metric": 0.9777482151985168, |
|
"best_model_checkpoint": "/kaggle/output/checkpoint-131000", |
|
"epoch": 5.704041720990873, |
|
"eval_steps": 1000, |
|
"global_step": 140000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.7777777777777777e-11, |
|
"loss": 1.1078, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.7750000000000004e-08, |
|
"loss": 1.128, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.330938123752495, |
|
"eval_loss": 1.1083118915557861, |
|
"eval_runtime": 12.3979, |
|
"eval_samples_per_second": 404.1, |
|
"eval_steps_per_second": 50.573, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 5.5527777777777784e-08, |
|
"loss": 1.1164, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.3323353293413174, |
|
"eval_loss": 1.102672815322876, |
|
"eval_runtime": 12.2861, |
|
"eval_samples_per_second": 407.778, |
|
"eval_steps_per_second": 51.033, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.327777777777778e-08, |
|
"loss": 1.1158, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.34211576846307384, |
|
"eval_loss": 1.1024774312973022, |
|
"eval_runtime": 12.3746, |
|
"eval_samples_per_second": 404.863, |
|
"eval_steps_per_second": 50.668, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.1105555555555557e-07, |
|
"loss": 1.1115, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.34890219560878244, |
|
"eval_loss": 1.1012382507324219, |
|
"eval_runtime": 12.5405, |
|
"eval_samples_per_second": 399.505, |
|
"eval_steps_per_second": 49.998, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.3880555555555558e-07, |
|
"loss": 1.1141, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.3534930139720559, |
|
"eval_loss": 1.1002192497253418, |
|
"eval_runtime": 12.3001, |
|
"eval_samples_per_second": 407.314, |
|
"eval_steps_per_second": 50.975, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.6658333333333335e-07, |
|
"loss": 1.112, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.3524950099800399, |
|
"eval_loss": 1.100056767463684, |
|
"eval_runtime": 12.4062, |
|
"eval_samples_per_second": 403.831, |
|
"eval_steps_per_second": 50.539, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9433333333333334e-07, |
|
"loss": 1.1115, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.34331337325349304, |
|
"eval_loss": 1.098986029624939, |
|
"eval_runtime": 12.4456, |
|
"eval_samples_per_second": 402.551, |
|
"eval_steps_per_second": 50.379, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.2211111111111114e-07, |
|
"loss": 1.109, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.3347305389221557, |
|
"eval_loss": 1.098678469657898, |
|
"eval_runtime": 12.5978, |
|
"eval_samples_per_second": 397.688, |
|
"eval_steps_per_second": 49.771, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.4986111111111113e-07, |
|
"loss": 1.1117, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.34311377245508984, |
|
"eval_loss": 1.0984419584274292, |
|
"eval_runtime": 12.313, |
|
"eval_samples_per_second": 406.886, |
|
"eval_steps_per_second": 50.922, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.776388888888889e-07, |
|
"loss": 1.1081, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.35429141716566864, |
|
"eval_loss": 1.0968743562698364, |
|
"eval_runtime": 12.4088, |
|
"eval_samples_per_second": 403.744, |
|
"eval_steps_per_second": 50.528, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.053888888888889e-07, |
|
"loss": 1.107, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.3722554890219561, |
|
"eval_loss": 1.0950665473937988, |
|
"eval_runtime": 12.5775, |
|
"eval_samples_per_second": 398.33, |
|
"eval_steps_per_second": 49.851, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.331666666666667e-07, |
|
"loss": 1.1057, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.37544910179640717, |
|
"eval_loss": 1.0940738916397095, |
|
"eval_runtime": 12.5186, |
|
"eval_samples_per_second": 400.203, |
|
"eval_steps_per_second": 50.085, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.609166666666667e-07, |
|
"loss": 1.1069, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.3401197604790419, |
|
"eval_loss": 1.0937162637710571, |
|
"eval_runtime": 12.2594, |
|
"eval_samples_per_second": 408.666, |
|
"eval_steps_per_second": 51.144, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.886944444444445e-07, |
|
"loss": 1.1046, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.37604790419161677, |
|
"eval_loss": 1.0926916599273682, |
|
"eval_runtime": 12.256, |
|
"eval_samples_per_second": 408.78, |
|
"eval_steps_per_second": 51.159, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.164444444444445e-07, |
|
"loss": 1.1046, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.39201596806387223, |
|
"eval_loss": 1.0912976264953613, |
|
"eval_runtime": 12.5019, |
|
"eval_samples_per_second": 400.74, |
|
"eval_steps_per_second": 50.152, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.442222222222223e-07, |
|
"loss": 1.1052, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.393812375249501, |
|
"eval_loss": 1.0901482105255127, |
|
"eval_runtime": 12.3407, |
|
"eval_samples_per_second": 405.973, |
|
"eval_steps_per_second": 50.807, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.7197222222222224e-07, |
|
"loss": 1.1011, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.3932135728542914, |
|
"eval_loss": 1.0901678800582886, |
|
"eval_runtime": 12.3644, |
|
"eval_samples_per_second": 405.197, |
|
"eval_steps_per_second": 50.71, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.997500000000001e-07, |
|
"loss": 1.1, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.38143712574850297, |
|
"eval_loss": 1.0897998809814453, |
|
"eval_runtime": 12.3516, |
|
"eval_samples_per_second": 405.615, |
|
"eval_steps_per_second": 50.763, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.275277777777778e-07, |
|
"loss": 1.1007, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.40678642714570856, |
|
"eval_loss": 1.0876121520996094, |
|
"eval_runtime": 12.6608, |
|
"eval_samples_per_second": 395.708, |
|
"eval_steps_per_second": 49.523, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.552777777777778e-07, |
|
"loss": 1.1013, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_accuracy": 0.4053892215568862, |
|
"eval_loss": 1.0876350402832031, |
|
"eval_runtime": 12.3243, |
|
"eval_samples_per_second": 406.513, |
|
"eval_steps_per_second": 50.875, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 5.830555555555556e-07, |
|
"loss": 1.0987, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.40858283433133735, |
|
"eval_loss": 1.085940957069397, |
|
"eval_runtime": 12.3217, |
|
"eval_samples_per_second": 406.598, |
|
"eval_steps_per_second": 50.886, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 6.108055555555556e-07, |
|
"loss": 1.0986, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.3972055888223553, |
|
"eval_loss": 1.085978388786316, |
|
"eval_runtime": 12.4285, |
|
"eval_samples_per_second": 403.107, |
|
"eval_steps_per_second": 50.449, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.385833333333334e-07, |
|
"loss": 1.0944, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_accuracy": 0.41097804391217563, |
|
"eval_loss": 1.0837496519088745, |
|
"eval_runtime": 12.7175, |
|
"eval_samples_per_second": 393.946, |
|
"eval_steps_per_second": 49.302, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.663611111111112e-07, |
|
"loss": 1.095, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.4121756487025948, |
|
"eval_loss": 1.0820704698562622, |
|
"eval_runtime": 12.3323, |
|
"eval_samples_per_second": 406.251, |
|
"eval_steps_per_second": 50.842, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.941111111111112e-07, |
|
"loss": 1.0915, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_accuracy": 0.4177644710578842, |
|
"eval_loss": 1.0762931108474731, |
|
"eval_runtime": 12.3887, |
|
"eval_samples_per_second": 404.399, |
|
"eval_steps_per_second": 50.61, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 7.218888888888889e-07, |
|
"loss": 1.0889, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"eval_accuracy": 0.43253493013972055, |
|
"eval_loss": 1.0695326328277588, |
|
"eval_runtime": 12.5611, |
|
"eval_samples_per_second": 398.85, |
|
"eval_steps_per_second": 49.916, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 7.49638888888889e-07, |
|
"loss": 1.0827, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.4343313373253493, |
|
"eval_loss": 1.0662548542022705, |
|
"eval_runtime": 12.641, |
|
"eval_samples_per_second": 396.328, |
|
"eval_steps_per_second": 49.6, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 7.774166666666668e-07, |
|
"loss": 1.0765, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_accuracy": 0.4315369261477046, |
|
"eval_loss": 1.0609794855117798, |
|
"eval_runtime": 12.4637, |
|
"eval_samples_per_second": 401.967, |
|
"eval_steps_per_second": 50.306, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.051666666666667e-07, |
|
"loss": 1.0736, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_accuracy": 0.4377245508982036, |
|
"eval_loss": 1.0587859153747559, |
|
"eval_runtime": 12.3801, |
|
"eval_samples_per_second": 404.683, |
|
"eval_steps_per_second": 50.646, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 8.329444444444445e-07, |
|
"loss": 1.0726, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_accuracy": 0.4249500998003992, |
|
"eval_loss": 1.0559338331222534, |
|
"eval_runtime": 12.5945, |
|
"eval_samples_per_second": 397.792, |
|
"eval_steps_per_second": 49.784, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 8.606944444444445e-07, |
|
"loss": 1.0751, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.4427145708582834, |
|
"eval_loss": 1.0576916933059692, |
|
"eval_runtime": 12.6324, |
|
"eval_samples_per_second": 396.599, |
|
"eval_steps_per_second": 49.634, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.884722222222224e-07, |
|
"loss": 1.0708, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_accuracy": 0.42634730538922155, |
|
"eval_loss": 1.0571542978286743, |
|
"eval_runtime": 12.4696, |
|
"eval_samples_per_second": 401.776, |
|
"eval_steps_per_second": 50.282, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 9.162222222222223e-07, |
|
"loss": 1.0711, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"eval_accuracy": 0.4419161676646707, |
|
"eval_loss": 1.0544849634170532, |
|
"eval_runtime": 12.415, |
|
"eval_samples_per_second": 403.544, |
|
"eval_steps_per_second": 50.503, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 9.440000000000001e-07, |
|
"loss": 1.0666, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_accuracy": 0.43333333333333335, |
|
"eval_loss": 1.05018150806427, |
|
"eval_runtime": 12.6226, |
|
"eval_samples_per_second": 396.906, |
|
"eval_steps_per_second": 49.673, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 9.7175e-07, |
|
"loss": 1.0698, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_accuracy": 0.4377245508982036, |
|
"eval_loss": 1.0488406419754028, |
|
"eval_runtime": 12.6246, |
|
"eval_samples_per_second": 396.844, |
|
"eval_steps_per_second": 49.665, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 9.995277777777778e-07, |
|
"loss": 1.067, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_accuracy": 0.4407185628742515, |
|
"eval_loss": 1.047359824180603, |
|
"eval_runtime": 12.4039, |
|
"eval_samples_per_second": 403.906, |
|
"eval_steps_per_second": 50.549, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.027277777777778e-06, |
|
"loss": 1.0636, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"eval_accuracy": 0.44451097804391215, |
|
"eval_loss": 1.0455204248428345, |
|
"eval_runtime": 12.3836, |
|
"eval_samples_per_second": 404.569, |
|
"eval_steps_per_second": 50.632, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.0550555555555557e-06, |
|
"loss": 1.0658, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"eval_accuracy": 0.4407185628742515, |
|
"eval_loss": 1.0461794137954712, |
|
"eval_runtime": 12.5546, |
|
"eval_samples_per_second": 399.057, |
|
"eval_steps_per_second": 49.942, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.0828333333333334e-06, |
|
"loss": 1.0635, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_accuracy": 0.4367265469061876, |
|
"eval_loss": 1.0461270809173584, |
|
"eval_runtime": 12.5509, |
|
"eval_samples_per_second": 399.173, |
|
"eval_steps_per_second": 49.956, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 1.1105833333333335e-06, |
|
"loss": 1.0596, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_accuracy": 0.449500998003992, |
|
"eval_loss": 1.0418727397918701, |
|
"eval_runtime": 12.4044, |
|
"eval_samples_per_second": 403.889, |
|
"eval_steps_per_second": 50.547, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 1.1383611111111113e-06, |
|
"loss": 1.0595, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"eval_accuracy": 0.4409181636726547, |
|
"eval_loss": 1.0417557954788208, |
|
"eval_runtime": 12.3642, |
|
"eval_samples_per_second": 405.202, |
|
"eval_steps_per_second": 50.711, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.1661111111111111e-06, |
|
"loss": 1.054, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"eval_accuracy": 0.4357285429141717, |
|
"eval_loss": 1.0383535623550415, |
|
"eval_runtime": 12.3634, |
|
"eval_samples_per_second": 405.23, |
|
"eval_steps_per_second": 50.714, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 1.193888888888889e-06, |
|
"loss": 1.0543, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_accuracy": 0.43812375249500995, |
|
"eval_loss": 1.0463169813156128, |
|
"eval_runtime": 12.6398, |
|
"eval_samples_per_second": 396.366, |
|
"eval_steps_per_second": 49.605, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.221638888888889e-06, |
|
"loss": 1.0579, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_accuracy": 0.4415169660678643, |
|
"eval_loss": 1.0388957262039185, |
|
"eval_runtime": 12.4542, |
|
"eval_samples_per_second": 402.274, |
|
"eval_steps_per_second": 50.344, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.2494166666666668e-06, |
|
"loss": 1.0561, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"eval_accuracy": 0.46187624750499, |
|
"eval_loss": 1.031552791595459, |
|
"eval_runtime": 12.4082, |
|
"eval_samples_per_second": 403.764, |
|
"eval_steps_per_second": 50.531, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.2771666666666668e-06, |
|
"loss": 1.0516, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_accuracy": 0.4538922155688623, |
|
"eval_loss": 1.0312999486923218, |
|
"eval_runtime": 12.3695, |
|
"eval_samples_per_second": 405.029, |
|
"eval_steps_per_second": 50.689, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.3049444444444446e-06, |
|
"loss": 1.0471, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"eval_accuracy": 0.46387225548902195, |
|
"eval_loss": 1.027535319328308, |
|
"eval_runtime": 12.5846, |
|
"eval_samples_per_second": 398.107, |
|
"eval_steps_per_second": 49.823, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.3326944444444447e-06, |
|
"loss": 1.054, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.4568862275449102, |
|
"eval_loss": 1.0308438539505005, |
|
"eval_runtime": 12.5646, |
|
"eval_samples_per_second": 398.74, |
|
"eval_steps_per_second": 49.902, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.3604722222222224e-06, |
|
"loss": 1.0445, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.47045908183632734, |
|
"eval_loss": 1.0230755805969238, |
|
"eval_runtime": 12.4305, |
|
"eval_samples_per_second": 403.041, |
|
"eval_steps_per_second": 50.44, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.3882222222222223e-06, |
|
"loss": 1.0479, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_accuracy": 0.47305389221556887, |
|
"eval_loss": 1.0201445817947388, |
|
"eval_runtime": 12.3672, |
|
"eval_samples_per_second": 405.103, |
|
"eval_steps_per_second": 50.698, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.416e-06, |
|
"loss": 1.0482, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"eval_accuracy": 0.4678642714570858, |
|
"eval_loss": 1.0167714357376099, |
|
"eval_runtime": 12.5982, |
|
"eval_samples_per_second": 397.677, |
|
"eval_steps_per_second": 49.769, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.4437500000000002e-06, |
|
"loss": 1.0406, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"eval_accuracy": 0.4722554890219561, |
|
"eval_loss": 1.0175116062164307, |
|
"eval_runtime": 12.6041, |
|
"eval_samples_per_second": 397.49, |
|
"eval_steps_per_second": 49.746, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.471527777777778e-06, |
|
"loss": 1.0457, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"eval_accuracy": 0.4748502994011976, |
|
"eval_loss": 1.010857343673706, |
|
"eval_runtime": 12.3728, |
|
"eval_samples_per_second": 404.921, |
|
"eval_steps_per_second": 50.676, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 1.4993055555555557e-06, |
|
"loss": 1.0399, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"eval_accuracy": 0.48522954091816367, |
|
"eval_loss": 1.0106219053268433, |
|
"eval_runtime": 12.3952, |
|
"eval_samples_per_second": 404.188, |
|
"eval_steps_per_second": 50.584, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.5270555555555558e-06, |
|
"loss": 1.0412, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_accuracy": 0.4750499001996008, |
|
"eval_loss": 1.0111658573150635, |
|
"eval_runtime": 12.4858, |
|
"eval_samples_per_second": 401.257, |
|
"eval_steps_per_second": 50.217, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.5548333333333335e-06, |
|
"loss": 1.0347, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"eval_accuracy": 0.47924151696606787, |
|
"eval_loss": 1.0121456384658813, |
|
"eval_runtime": 12.6206, |
|
"eval_samples_per_second": 396.97, |
|
"eval_steps_per_second": 49.681, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 1.5825833333333334e-06, |
|
"loss": 1.0324, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"eval_accuracy": 0.4738522954091816, |
|
"eval_loss": 1.0100611448287964, |
|
"eval_runtime": 12.3748, |
|
"eval_samples_per_second": 404.857, |
|
"eval_steps_per_second": 50.668, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.6103611111111112e-06, |
|
"loss": 1.0346, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"eval_accuracy": 0.4694610778443114, |
|
"eval_loss": 1.0124626159667969, |
|
"eval_runtime": 12.4716, |
|
"eval_samples_per_second": 401.711, |
|
"eval_steps_per_second": 50.274, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.638111111111111e-06, |
|
"loss": 1.0309, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_accuracy": 0.48982035928143713, |
|
"eval_loss": 1.0022122859954834, |
|
"eval_runtime": 12.4734, |
|
"eval_samples_per_second": 401.655, |
|
"eval_steps_per_second": 50.267, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.665888888888889e-06, |
|
"loss": 1.0351, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"eval_accuracy": 0.4802395209580838, |
|
"eval_loss": 1.004167914390564, |
|
"eval_runtime": 12.6287, |
|
"eval_samples_per_second": 396.714, |
|
"eval_steps_per_second": 49.649, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.693638888888889e-06, |
|
"loss": 1.0312, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"eval_accuracy": 0.48323353293413174, |
|
"eval_loss": 1.0065865516662598, |
|
"eval_runtime": 12.4143, |
|
"eval_samples_per_second": 403.567, |
|
"eval_steps_per_second": 50.506, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.7214166666666666e-06, |
|
"loss": 1.0329, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"eval_accuracy": 0.47924151696606787, |
|
"eval_loss": 1.0041630268096924, |
|
"eval_runtime": 12.4326, |
|
"eval_samples_per_second": 402.974, |
|
"eval_steps_per_second": 50.432, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.7491666666666667e-06, |
|
"loss": 1.0355, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"eval_accuracy": 0.49261477045908186, |
|
"eval_loss": 1.0007619857788086, |
|
"eval_runtime": 12.4051, |
|
"eval_samples_per_second": 403.866, |
|
"eval_steps_per_second": 50.544, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 1.7769444444444447e-06, |
|
"loss": 1.0297, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"eval_accuracy": 0.4942115768463074, |
|
"eval_loss": 0.9986936450004578, |
|
"eval_runtime": 12.5727, |
|
"eval_samples_per_second": 398.482, |
|
"eval_steps_per_second": 49.87, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 1.8046944444444446e-06, |
|
"loss": 1.0277, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"eval_accuracy": 0.49121756487025947, |
|
"eval_loss": 1.0008972883224487, |
|
"eval_runtime": 12.4544, |
|
"eval_samples_per_second": 402.267, |
|
"eval_steps_per_second": 50.344, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.8324722222222223e-06, |
|
"loss": 1.0341, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"eval_accuracy": 0.499001996007984, |
|
"eval_loss": 0.9953575134277344, |
|
"eval_runtime": 12.4973, |
|
"eval_samples_per_second": 400.886, |
|
"eval_steps_per_second": 50.171, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 1.8602222222222222e-06, |
|
"loss": 1.0279, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"eval_accuracy": 0.493812375249501, |
|
"eval_loss": 0.998134195804596, |
|
"eval_runtime": 12.6589, |
|
"eval_samples_per_second": 395.769, |
|
"eval_steps_per_second": 49.53, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.8880000000000002e-06, |
|
"loss": 1.03, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"eval_accuracy": 0.5051896207584831, |
|
"eval_loss": 0.9947481751441956, |
|
"eval_runtime": 12.7939, |
|
"eval_samples_per_second": 391.594, |
|
"eval_steps_per_second": 49.008, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.9157500000000003e-06, |
|
"loss": 1.0283, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"eval_accuracy": 0.5001996007984032, |
|
"eval_loss": 0.997833251953125, |
|
"eval_runtime": 12.8129, |
|
"eval_samples_per_second": 391.012, |
|
"eval_steps_per_second": 48.935, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.943527777777778e-06, |
|
"loss": 1.0288, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"eval_accuracy": 0.49181636726546907, |
|
"eval_loss": 1.00169038772583, |
|
"eval_runtime": 12.5936, |
|
"eval_samples_per_second": 397.822, |
|
"eval_steps_per_second": 49.787, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 1.9712777777777777e-06, |
|
"loss": 1.0238, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"eval_accuracy": 0.4944111776447106, |
|
"eval_loss": 1.001316785812378, |
|
"eval_runtime": 12.485, |
|
"eval_samples_per_second": 401.282, |
|
"eval_steps_per_second": 50.22, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.9990555555555557e-06, |
|
"loss": 1.0268, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"eval_accuracy": 0.49241516966067866, |
|
"eval_loss": 1.0005468130111694, |
|
"eval_runtime": 12.501, |
|
"eval_samples_per_second": 400.768, |
|
"eval_steps_per_second": 50.156, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 2.026805555555556e-06, |
|
"loss": 1.0266, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"eval_accuracy": 0.4940119760479042, |
|
"eval_loss": 0.9998964071273804, |
|
"eval_runtime": 12.6227, |
|
"eval_samples_per_second": 396.904, |
|
"eval_steps_per_second": 49.672, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 2.0545833333333335e-06, |
|
"loss": 1.0222, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"eval_accuracy": 0.501996007984032, |
|
"eval_loss": 0.9927905201911926, |
|
"eval_runtime": 12.7924, |
|
"eval_samples_per_second": 391.64, |
|
"eval_steps_per_second": 49.014, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 2.0823333333333334e-06, |
|
"loss": 1.0198, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"eval_accuracy": 0.5025948103792415, |
|
"eval_loss": 0.9925487041473389, |
|
"eval_runtime": 12.6297, |
|
"eval_samples_per_second": 396.684, |
|
"eval_steps_per_second": 49.645, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 2.1101111111111113e-06, |
|
"loss": 1.0203, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"eval_accuracy": 0.5027944111776447, |
|
"eval_loss": 0.9909006953239441, |
|
"eval_runtime": 12.4265, |
|
"eval_samples_per_second": 403.17, |
|
"eval_steps_per_second": 50.457, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 2.137861111111111e-06, |
|
"loss": 1.022, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"eval_accuracy": 0.48303393213572854, |
|
"eval_loss": 1.010209083557129, |
|
"eval_runtime": 12.3999, |
|
"eval_samples_per_second": 404.037, |
|
"eval_steps_per_second": 50.565, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 2.1656388888888888e-06, |
|
"loss": 1.0133, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"eval_accuracy": 0.5075848303393213, |
|
"eval_loss": 0.9907119274139404, |
|
"eval_runtime": 12.4429, |
|
"eval_samples_per_second": 402.64, |
|
"eval_steps_per_second": 50.39, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 2.193388888888889e-06, |
|
"loss": 1.0207, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"eval_accuracy": 0.4944111776447106, |
|
"eval_loss": 1.0015305280685425, |
|
"eval_runtime": 12.7143, |
|
"eval_samples_per_second": 394.046, |
|
"eval_steps_per_second": 49.315, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 2.221166666666667e-06, |
|
"loss": 1.0172, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"eval_accuracy": 0.49540918163672654, |
|
"eval_loss": 0.9991635680198669, |
|
"eval_runtime": 12.7122, |
|
"eval_samples_per_second": 394.109, |
|
"eval_steps_per_second": 49.323, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 2.248916666666667e-06, |
|
"loss": 1.0185, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"eval_accuracy": 0.49001996007984033, |
|
"eval_loss": 1.0026898384094238, |
|
"eval_runtime": 12.6101, |
|
"eval_samples_per_second": 397.299, |
|
"eval_steps_per_second": 49.722, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 2.2766944444444444e-06, |
|
"loss": 1.0287, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"eval_accuracy": 0.4998003992015968, |
|
"eval_loss": 0.9921432733535767, |
|
"eval_runtime": 12.4639, |
|
"eval_samples_per_second": 401.961, |
|
"eval_steps_per_second": 50.305, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 2.3044444444444443e-06, |
|
"loss": 1.0142, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"eval_accuracy": 0.5017964071856288, |
|
"eval_loss": 0.9938461184501648, |
|
"eval_runtime": 12.4125, |
|
"eval_samples_per_second": 403.625, |
|
"eval_steps_per_second": 50.514, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 2.3322222222222223e-06, |
|
"loss": 1.0186, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"eval_accuracy": 0.5089820359281437, |
|
"eval_loss": 0.9863815307617188, |
|
"eval_runtime": 12.6556, |
|
"eval_samples_per_second": 395.872, |
|
"eval_steps_per_second": 49.543, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 2.3599722222222226e-06, |
|
"loss": 1.0151, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"eval_accuracy": 0.5041916167664671, |
|
"eval_loss": 0.9891662001609802, |
|
"eval_runtime": 12.6529, |
|
"eval_samples_per_second": 395.958, |
|
"eval_steps_per_second": 49.554, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.38775e-06, |
|
"loss": 1.0268, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"eval_accuracy": 0.5043912175648703, |
|
"eval_loss": 0.9857804179191589, |
|
"eval_runtime": 12.4197, |
|
"eval_samples_per_second": 403.391, |
|
"eval_steps_per_second": 50.484, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 2.4155e-06, |
|
"loss": 1.0152, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"eval_accuracy": 0.5075848303393213, |
|
"eval_loss": 0.9841725826263428, |
|
"eval_runtime": 12.4152, |
|
"eval_samples_per_second": 403.538, |
|
"eval_steps_per_second": 50.503, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 2.443277777777778e-06, |
|
"loss": 1.0201, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"eval_accuracy": 0.5065868263473053, |
|
"eval_loss": 0.9841102957725525, |
|
"eval_runtime": 12.4716, |
|
"eval_samples_per_second": 401.711, |
|
"eval_steps_per_second": 50.274, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 2.471027777777778e-06, |
|
"loss": 1.0113, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"eval_accuracy": 0.5079840319361277, |
|
"eval_loss": 0.9851917028427124, |
|
"eval_runtime": 12.6634, |
|
"eval_samples_per_second": 395.627, |
|
"eval_steps_per_second": 49.513, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 2.498805555555556e-06, |
|
"loss": 1.0195, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"eval_accuracy": 0.5039920159680639, |
|
"eval_loss": 0.9855921864509583, |
|
"eval_runtime": 12.6226, |
|
"eval_samples_per_second": 396.908, |
|
"eval_steps_per_second": 49.673, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 2.5265555555555557e-06, |
|
"loss": 1.0202, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"eval_accuracy": 0.5001996007984032, |
|
"eval_loss": 0.9933446645736694, |
|
"eval_runtime": 12.4993, |
|
"eval_samples_per_second": 400.821, |
|
"eval_steps_per_second": 50.163, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 2.5543333333333337e-06, |
|
"loss": 1.0183, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"eval_accuracy": 0.5, |
|
"eval_loss": 0.9928790926933289, |
|
"eval_runtime": 12.4539, |
|
"eval_samples_per_second": 402.285, |
|
"eval_steps_per_second": 50.346, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 2.5820833333333335e-06, |
|
"loss": 1.02, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"eval_accuracy": 0.5011976047904192, |
|
"eval_loss": 0.9867545962333679, |
|
"eval_runtime": 12.4054, |
|
"eval_samples_per_second": 403.855, |
|
"eval_steps_per_second": 50.542, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 2.609861111111111e-06, |
|
"loss": 1.0144, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"eval_accuracy": 0.5027944111776447, |
|
"eval_loss": 0.9887515902519226, |
|
"eval_runtime": 12.8101, |
|
"eval_samples_per_second": 391.097, |
|
"eval_steps_per_second": 48.946, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 2.6376111111111114e-06, |
|
"loss": 1.0166, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"eval_accuracy": 0.5077844311377245, |
|
"eval_loss": 0.9852330684661865, |
|
"eval_runtime": 12.6394, |
|
"eval_samples_per_second": 396.379, |
|
"eval_steps_per_second": 49.607, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 2.6653888888888894e-06, |
|
"loss": 1.0173, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"eval_accuracy": 0.5053892215568863, |
|
"eval_loss": 0.9848083257675171, |
|
"eval_runtime": 12.5421, |
|
"eval_samples_per_second": 399.456, |
|
"eval_steps_per_second": 49.992, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 2.6931388888888892e-06, |
|
"loss": 1.0159, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"eval_accuracy": 0.5051896207584831, |
|
"eval_loss": 0.9930784702301025, |
|
"eval_runtime": 12.6498, |
|
"eval_samples_per_second": 396.054, |
|
"eval_steps_per_second": 49.566, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 2.7209166666666668e-06, |
|
"loss": 1.0126, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"eval_accuracy": 0.5037924151696607, |
|
"eval_loss": 0.9865526556968689, |
|
"eval_runtime": 12.4969, |
|
"eval_samples_per_second": 400.899, |
|
"eval_steps_per_second": 50.172, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 2.7486666666666666e-06, |
|
"loss": 1.0129, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"eval_accuracy": 0.5073852295409181, |
|
"eval_loss": 0.9890230298042297, |
|
"eval_runtime": 12.7759, |
|
"eval_samples_per_second": 392.143, |
|
"eval_steps_per_second": 49.077, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 2.7764444444444446e-06, |
|
"loss": 1.0145, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"eval_accuracy": 0.506187624750499, |
|
"eval_loss": 0.9834555387496948, |
|
"eval_runtime": 12.6734, |
|
"eval_samples_per_second": 395.316, |
|
"eval_steps_per_second": 49.474, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 2.804194444444445e-06, |
|
"loss": 1.0164, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"eval_accuracy": 0.5081836327345309, |
|
"eval_loss": 0.9881391525268555, |
|
"eval_runtime": 12.4161, |
|
"eval_samples_per_second": 403.509, |
|
"eval_steps_per_second": 50.499, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 2.8319722222222225e-06, |
|
"loss": 1.0131, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"eval_accuracy": 0.5007984031936128, |
|
"eval_loss": 0.9866752624511719, |
|
"eval_runtime": 12.6211, |
|
"eval_samples_per_second": 396.953, |
|
"eval_steps_per_second": 49.679, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 2.8597222222222223e-06, |
|
"loss": 1.0172, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"eval_accuracy": 0.5117764471057884, |
|
"eval_loss": 0.9857889413833618, |
|
"eval_runtime": 12.4246, |
|
"eval_samples_per_second": 403.233, |
|
"eval_steps_per_second": 50.464, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 2.8875000000000003e-06, |
|
"loss": 1.0118, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"eval_accuracy": 0.501996007984032, |
|
"eval_loss": 0.9917896389961243, |
|
"eval_runtime": 12.7707, |
|
"eval_samples_per_second": 392.304, |
|
"eval_steps_per_second": 49.097, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 2.91525e-06, |
|
"loss": 1.0082, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"eval_accuracy": 0.4940119760479042, |
|
"eval_loss": 1.0039118528366089, |
|
"eval_runtime": 12.7128, |
|
"eval_samples_per_second": 394.092, |
|
"eval_steps_per_second": 49.32, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 2.9430277777777777e-06, |
|
"loss": 1.006, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"eval_accuracy": 0.5023952095808383, |
|
"eval_loss": 0.9883149862289429, |
|
"eval_runtime": 12.7217, |
|
"eval_samples_per_second": 393.814, |
|
"eval_steps_per_second": 49.286, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 2.970777777777778e-06, |
|
"loss": 1.008, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"eval_accuracy": 0.5023952095808383, |
|
"eval_loss": 0.9926295876502991, |
|
"eval_runtime": 12.4187, |
|
"eval_samples_per_second": 403.424, |
|
"eval_steps_per_second": 50.488, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 2.998555555555556e-06, |
|
"loss": 1.0078, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"eval_accuracy": 0.5069860279441117, |
|
"eval_loss": 0.9883684515953064, |
|
"eval_runtime": 12.4562, |
|
"eval_samples_per_second": 402.21, |
|
"eval_steps_per_second": 50.336, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 3.026305555555556e-06, |
|
"loss": 1.0052, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"eval_accuracy": 0.5083832335329341, |
|
"eval_loss": 0.9893190264701843, |
|
"eval_runtime": 12.4193, |
|
"eval_samples_per_second": 403.404, |
|
"eval_steps_per_second": 50.486, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 3.0540833333333334e-06, |
|
"loss": 1.0102, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"eval_accuracy": 0.5059880239520959, |
|
"eval_loss": 0.9876956939697266, |
|
"eval_runtime": 12.7204, |
|
"eval_samples_per_second": 393.854, |
|
"eval_steps_per_second": 49.291, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 3.0818333333333333e-06, |
|
"loss": 1.004, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"eval_accuracy": 0.5099800399201597, |
|
"eval_loss": 0.9822403788566589, |
|
"eval_runtime": 12.7671, |
|
"eval_samples_per_second": 392.416, |
|
"eval_steps_per_second": 49.111, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 3.1096111111111113e-06, |
|
"loss": 1.016, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"eval_accuracy": 0.5075848303393213, |
|
"eval_loss": 0.9813176393508911, |
|
"eval_runtime": 12.5425, |
|
"eval_samples_per_second": 399.442, |
|
"eval_steps_per_second": 49.99, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 3.137361111111111e-06, |
|
"loss": 1.0024, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"eval_accuracy": 0.5, |
|
"eval_loss": 0.9851263165473938, |
|
"eval_runtime": 12.718, |
|
"eval_samples_per_second": 393.931, |
|
"eval_steps_per_second": 49.3, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 3.165138888888889e-06, |
|
"loss": 1.0096, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"eval_accuracy": 0.5101796407185629, |
|
"eval_loss": 0.9791144132614136, |
|
"eval_runtime": 12.4876, |
|
"eval_samples_per_second": 401.199, |
|
"eval_steps_per_second": 50.21, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 3.192888888888889e-06, |
|
"loss": 1.0111, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"eval_accuracy": 0.5131736526946108, |
|
"eval_loss": 0.9794335961341858, |
|
"eval_runtime": 12.6803, |
|
"eval_samples_per_second": 395.101, |
|
"eval_steps_per_second": 49.447, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 3.220666666666667e-06, |
|
"loss": 1.005, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"eval_accuracy": 0.506187624750499, |
|
"eval_loss": 0.9822920560836792, |
|
"eval_runtime": 12.7349, |
|
"eval_samples_per_second": 393.408, |
|
"eval_steps_per_second": 49.235, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 3.248416666666667e-06, |
|
"loss": 1.0116, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"eval_accuracy": 0.5073852295409181, |
|
"eval_loss": 0.9905573725700378, |
|
"eval_runtime": 12.5859, |
|
"eval_samples_per_second": 398.064, |
|
"eval_steps_per_second": 49.818, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 3.276194444444445e-06, |
|
"loss": 1.0108, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"eval_accuracy": 0.5121756487025948, |
|
"eval_loss": 0.9814850687980652, |
|
"eval_runtime": 12.5991, |
|
"eval_samples_per_second": 397.649, |
|
"eval_steps_per_second": 49.766, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 3.303944444444445e-06, |
|
"loss": 1.0059, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"eval_accuracy": 0.5135728542914172, |
|
"eval_loss": 0.9780199527740479, |
|
"eval_runtime": 12.4558, |
|
"eval_samples_per_second": 402.221, |
|
"eval_steps_per_second": 50.338, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 3.331722222222222e-06, |
|
"loss": 1.0135, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"eval_accuracy": 0.5039920159680639, |
|
"eval_loss": 0.9968580603599548, |
|
"eval_runtime": 12.6517, |
|
"eval_samples_per_second": 395.994, |
|
"eval_steps_per_second": 49.558, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 3.3595e-06, |
|
"loss": 1.0023, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"eval_accuracy": 0.512375249500998, |
|
"eval_loss": 0.9820229411125183, |
|
"eval_runtime": 12.6842, |
|
"eval_samples_per_second": 394.979, |
|
"eval_steps_per_second": 49.431, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 3.38725e-06, |
|
"loss": 1.0085, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"eval_accuracy": 0.5117764471057884, |
|
"eval_loss": 0.9823063611984253, |
|
"eval_runtime": 12.7386, |
|
"eval_samples_per_second": 393.293, |
|
"eval_steps_per_second": 49.22, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 3.415027777777778e-06, |
|
"loss": 1.0076, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"eval_accuracy": 0.500998003992016, |
|
"eval_loss": 0.9879907369613647, |
|
"eval_runtime": 12.4926, |
|
"eval_samples_per_second": 401.037, |
|
"eval_steps_per_second": 50.19, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 3.442777777777778e-06, |
|
"loss": 1.0005, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"eval_accuracy": 0.5093812375249501, |
|
"eval_loss": 0.9808560609817505, |
|
"eval_runtime": 12.4952, |
|
"eval_samples_per_second": 400.955, |
|
"eval_steps_per_second": 50.179, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 3.470555555555556e-06, |
|
"loss": 1.0048, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"eval_accuracy": 0.5099800399201597, |
|
"eval_loss": 0.981860876083374, |
|
"eval_runtime": 12.4425, |
|
"eval_samples_per_second": 402.652, |
|
"eval_steps_per_second": 50.392, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 3.4983333333333334e-06, |
|
"loss": 1.0049, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"eval_accuracy": 0.5105788423153692, |
|
"eval_loss": 0.9849931001663208, |
|
"eval_runtime": 12.6135, |
|
"eval_samples_per_second": 397.192, |
|
"eval_steps_per_second": 49.708, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 3.5261111111111114e-06, |
|
"loss": 1.0056, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"eval_accuracy": 0.5049900199600799, |
|
"eval_loss": 0.9838124513626099, |
|
"eval_runtime": 12.4113, |
|
"eval_samples_per_second": 403.663, |
|
"eval_steps_per_second": 50.518, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 3.5538611111111113e-06, |
|
"loss": 0.9995, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"eval_accuracy": 0.5099800399201597, |
|
"eval_loss": 0.9798722863197327, |
|
"eval_runtime": 12.3902, |
|
"eval_samples_per_second": 404.351, |
|
"eval_steps_per_second": 50.604, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 3.5816388888888893e-06, |
|
"loss": 1.003, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"eval_accuracy": 0.5055888223552895, |
|
"eval_loss": 0.9836646914482117, |
|
"eval_runtime": 12.3384, |
|
"eval_samples_per_second": 406.05, |
|
"eval_steps_per_second": 50.817, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 3.609388888888889e-06, |
|
"loss": 0.9956, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"eval_accuracy": 0.5079840319361277, |
|
"eval_loss": 0.9859731793403625, |
|
"eval_runtime": 12.6442, |
|
"eval_samples_per_second": 396.23, |
|
"eval_steps_per_second": 49.588, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 3.6371666666666667e-06, |
|
"loss": 0.9989, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"eval_accuracy": 0.5121756487025948, |
|
"eval_loss": 0.9777482151985168, |
|
"eval_runtime": 12.5026, |
|
"eval_samples_per_second": 400.716, |
|
"eval_steps_per_second": 50.149, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 3.6649166666666665e-06, |
|
"loss": 0.9992, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"eval_accuracy": 0.5131736526946108, |
|
"eval_loss": 0.9786902666091919, |
|
"eval_runtime": 12.4113, |
|
"eval_samples_per_second": 403.664, |
|
"eval_steps_per_second": 50.518, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 3.6926666666666673e-06, |
|
"loss": 1.0033, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"eval_accuracy": 0.5103792415169661, |
|
"eval_loss": 0.9823509454727173, |
|
"eval_runtime": 12.3681, |
|
"eval_samples_per_second": 405.073, |
|
"eval_steps_per_second": 50.695, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 3.7204444444444444e-06, |
|
"loss": 1.001, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"eval_accuracy": 0.5055888223552895, |
|
"eval_loss": 0.980103075504303, |
|
"eval_runtime": 12.658, |
|
"eval_samples_per_second": 395.796, |
|
"eval_steps_per_second": 49.534, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 3.7482222222222224e-06, |
|
"loss": 1.0074, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"eval_accuracy": 0.5067864271457085, |
|
"eval_loss": 0.9919066429138184, |
|
"eval_runtime": 12.669, |
|
"eval_samples_per_second": 395.452, |
|
"eval_steps_per_second": 49.491, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 3.7760000000000004e-06, |
|
"loss": 1.0056, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"eval_accuracy": 0.5117764471057884, |
|
"eval_loss": 0.9795469641685486, |
|
"eval_runtime": 12.3923, |
|
"eval_samples_per_second": 404.284, |
|
"eval_steps_per_second": 50.596, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 3.8037500000000002e-06, |
|
"loss": 1.0029, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"eval_accuracy": 0.5101796407185629, |
|
"eval_loss": 0.9805279970169067, |
|
"eval_runtime": 12.4005, |
|
"eval_samples_per_second": 404.015, |
|
"eval_steps_per_second": 50.562, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 3.831527777777778e-06, |
|
"loss": 1.0018, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"eval_accuracy": 0.5015968063872256, |
|
"eval_loss": 0.9934979677200317, |
|
"eval_runtime": 12.6438, |
|
"eval_samples_per_second": 396.24, |
|
"eval_steps_per_second": 49.589, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 3.859277777777778e-06, |
|
"loss": 0.9985, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"eval_accuracy": 0.5083832335329341, |
|
"eval_loss": 0.984520435333252, |
|
"eval_runtime": 12.6399, |
|
"eval_samples_per_second": 396.364, |
|
"eval_steps_per_second": 49.605, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 3.887055555555556e-06, |
|
"loss": 1.0007, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"eval_accuracy": 0.5037924151696607, |
|
"eval_loss": 0.9855640530586243, |
|
"eval_runtime": 12.4087, |
|
"eval_samples_per_second": 403.748, |
|
"eval_steps_per_second": 50.529, |
|
"step": 140000 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 10000000, |
|
"num_train_epochs": 408, |
|
"save_steps": 1000, |
|
"total_flos": 4.878134129590272e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|