|
{
|
|
"best_metric": 0.8478260869565217,
|
|
"best_model_checkpoint": "vit-base-patch16-224-ve-U13b-80RX\\checkpoint-103",
|
|
"epoch": 39.61165048543689,
|
|
"eval_steps": 500,
|
|
"global_step": 680,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.58,
|
|
"learning_rate": 1.6176470588235296e-05,
|
|
"loss": 1.3857,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.99,
|
|
"eval_accuracy": 0.5652173913043478,
|
|
"eval_loss": 1.370294451713562,
|
|
"eval_runtime": 0.8127,
|
|
"eval_samples_per_second": 56.602,
|
|
"eval_steps_per_second": 7.383,
|
|
"step": 17
|
|
},
|
|
{
|
|
"epoch": 1.17,
|
|
"learning_rate": 3.235294117647059e-05,
|
|
"loss": 1.3738,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 1.75,
|
|
"learning_rate": 4.8529411764705885e-05,
|
|
"loss": 1.3134,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 1.98,
|
|
"eval_accuracy": 0.45652173913043476,
|
|
"eval_loss": 1.2235466241836548,
|
|
"eval_runtime": 0.7992,
|
|
"eval_samples_per_second": 57.558,
|
|
"eval_steps_per_second": 7.508,
|
|
"step": 34
|
|
},
|
|
{
|
|
"epoch": 2.33,
|
|
"learning_rate": 5.448916408668731e-05,
|
|
"loss": 1.1875,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 2.91,
|
|
"learning_rate": 5.363777089783282e-05,
|
|
"loss": 1.0384,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 2.97,
|
|
"eval_accuracy": 0.5434782608695652,
|
|
"eval_loss": 1.0173379182815552,
|
|
"eval_runtime": 0.8032,
|
|
"eval_samples_per_second": 57.271,
|
|
"eval_steps_per_second": 7.47,
|
|
"step": 51
|
|
},
|
|
{
|
|
"epoch": 3.5,
|
|
"learning_rate": 5.278637770897833e-05,
|
|
"loss": 0.908,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 3.96,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.8346009254455566,
|
|
"eval_runtime": 0.7872,
|
|
"eval_samples_per_second": 58.435,
|
|
"eval_steps_per_second": 7.622,
|
|
"step": 68
|
|
},
|
|
{
|
|
"epoch": 4.08,
|
|
"learning_rate": 5.193498452012384e-05,
|
|
"loss": 0.8671,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 4.66,
|
|
"learning_rate": 5.108359133126935e-05,
|
|
"loss": 0.75,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 4.95,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.7342777252197266,
|
|
"eval_runtime": 0.8302,
|
|
"eval_samples_per_second": 55.408,
|
|
"eval_steps_per_second": 7.227,
|
|
"step": 85
|
|
},
|
|
{
|
|
"epoch": 5.24,
|
|
"learning_rate": 5.023219814241486e-05,
|
|
"loss": 0.6277,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 5.83,
|
|
"learning_rate": 4.9380804953560375e-05,
|
|
"loss": 0.5131,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.8478260869565217,
|
|
"eval_loss": 0.6099294424057007,
|
|
"eval_runtime": 0.8737,
|
|
"eval_samples_per_second": 52.649,
|
|
"eval_steps_per_second": 6.867,
|
|
"step": 103
|
|
},
|
|
{
|
|
"epoch": 6.41,
|
|
"learning_rate": 4.8529411764705885e-05,
|
|
"loss": 0.441,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 6.99,
|
|
"learning_rate": 4.7678018575851394e-05,
|
|
"loss": 0.395,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 6.99,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.5931932926177979,
|
|
"eval_runtime": 0.8642,
|
|
"eval_samples_per_second": 53.227,
|
|
"eval_steps_per_second": 6.943,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 7.57,
|
|
"learning_rate": 4.6826625386996904e-05,
|
|
"loss": 0.355,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 7.98,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.7208946943283081,
|
|
"eval_runtime": 0.8322,
|
|
"eval_samples_per_second": 55.275,
|
|
"eval_steps_per_second": 7.21,
|
|
"step": 137
|
|
},
|
|
{
|
|
"epoch": 8.16,
|
|
"learning_rate": 4.597523219814241e-05,
|
|
"loss": 0.339,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 8.74,
|
|
"learning_rate": 4.512383900928793e-05,
|
|
"loss": 0.2658,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 8.97,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.5652452707290649,
|
|
"eval_runtime": 0.7862,
|
|
"eval_samples_per_second": 58.511,
|
|
"eval_steps_per_second": 7.632,
|
|
"step": 154
|
|
},
|
|
{
|
|
"epoch": 9.32,
|
|
"learning_rate": 4.427244582043344e-05,
|
|
"loss": 0.2305,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 9.9,
|
|
"learning_rate": 4.342105263157895e-05,
|
|
"loss": 0.248,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 9.96,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.7103272080421448,
|
|
"eval_runtime": 0.8142,
|
|
"eval_samples_per_second": 56.498,
|
|
"eval_steps_per_second": 7.369,
|
|
"step": 171
|
|
},
|
|
{
|
|
"epoch": 10.49,
|
|
"learning_rate": 4.2569659442724465e-05,
|
|
"loss": 0.2086,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 10.95,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.6788336038589478,
|
|
"eval_runtime": 0.8397,
|
|
"eval_samples_per_second": 54.782,
|
|
"eval_steps_per_second": 7.145,
|
|
"step": 188
|
|
},
|
|
{
|
|
"epoch": 11.07,
|
|
"learning_rate": 4.171826625386997e-05,
|
|
"loss": 0.2348,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 11.65,
|
|
"learning_rate": 4.0866873065015484e-05,
|
|
"loss": 0.1532,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.5725099444389343,
|
|
"eval_runtime": 0.8672,
|
|
"eval_samples_per_second": 53.045,
|
|
"eval_steps_per_second": 6.919,
|
|
"step": 206
|
|
},
|
|
{
|
|
"epoch": 12.23,
|
|
"learning_rate": 4.001547987616099e-05,
|
|
"loss": 0.161,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 12.82,
|
|
"learning_rate": 3.91640866873065e-05,
|
|
"loss": 0.147,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 12.99,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6129825115203857,
|
|
"eval_runtime": 0.7782,
|
|
"eval_samples_per_second": 59.112,
|
|
"eval_steps_per_second": 7.71,
|
|
"step": 223
|
|
},
|
|
{
|
|
"epoch": 13.4,
|
|
"learning_rate": 3.831269349845202e-05,
|
|
"loss": 0.1096,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 13.98,
|
|
"learning_rate": 3.746130030959752e-05,
|
|
"loss": 0.1145,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 13.98,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.656341552734375,
|
|
"eval_runtime": 0.7982,
|
|
"eval_samples_per_second": 57.63,
|
|
"eval_steps_per_second": 7.517,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 14.56,
|
|
"learning_rate": 3.660990712074304e-05,
|
|
"loss": 0.1053,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 14.97,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.5992771983146667,
|
|
"eval_runtime": 0.7932,
|
|
"eval_samples_per_second": 57.993,
|
|
"eval_steps_per_second": 7.564,
|
|
"step": 257
|
|
},
|
|
{
|
|
"epoch": 15.15,
|
|
"learning_rate": 3.575851393188854e-05,
|
|
"loss": 0.1246,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 15.73,
|
|
"learning_rate": 3.490712074303406e-05,
|
|
"loss": 0.0971,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 15.96,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.8839967250823975,
|
|
"eval_runtime": 0.8222,
|
|
"eval_samples_per_second": 55.947,
|
|
"eval_steps_per_second": 7.297,
|
|
"step": 274
|
|
},
|
|
{
|
|
"epoch": 16.31,
|
|
"learning_rate": 3.405572755417957e-05,
|
|
"loss": 0.1263,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 16.89,
|
|
"learning_rate": 3.3204334365325076e-05,
|
|
"loss": 0.0947,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 16.95,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6255514025688171,
|
|
"eval_runtime": 0.8982,
|
|
"eval_samples_per_second": 51.213,
|
|
"eval_steps_per_second": 6.68,
|
|
"step": 291
|
|
},
|
|
{
|
|
"epoch": 17.48,
|
|
"learning_rate": 3.235294117647059e-05,
|
|
"loss": 0.1055,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 18.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.8406044244766235,
|
|
"eval_runtime": 0.8512,
|
|
"eval_samples_per_second": 54.041,
|
|
"eval_steps_per_second": 7.049,
|
|
"step": 309
|
|
},
|
|
{
|
|
"epoch": 18.06,
|
|
"learning_rate": 3.1501547987616095e-05,
|
|
"loss": 0.1069,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 18.64,
|
|
"learning_rate": 3.065015479876161e-05,
|
|
"loss": 0.0974,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 18.99,
|
|
"eval_accuracy": 0.8478260869565217,
|
|
"eval_loss": 0.6354570984840393,
|
|
"eval_runtime": 0.9072,
|
|
"eval_samples_per_second": 50.705,
|
|
"eval_steps_per_second": 6.614,
|
|
"step": 326
|
|
},
|
|
{
|
|
"epoch": 19.22,
|
|
"learning_rate": 2.9798761609907124e-05,
|
|
"loss": 0.1043,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 19.81,
|
|
"learning_rate": 2.894736842105263e-05,
|
|
"loss": 0.1215,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 19.98,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6650977730751038,
|
|
"eval_runtime": 0.7947,
|
|
"eval_samples_per_second": 57.885,
|
|
"eval_steps_per_second": 7.55,
|
|
"step": 343
|
|
},
|
|
{
|
|
"epoch": 20.39,
|
|
"learning_rate": 2.8095975232198143e-05,
|
|
"loss": 0.0847,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 20.97,
|
|
"learning_rate": 2.7244582043343656e-05,
|
|
"loss": 0.108,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 20.97,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.8301287889480591,
|
|
"eval_runtime": 0.8602,
|
|
"eval_samples_per_second": 53.476,
|
|
"eval_steps_per_second": 6.975,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 21.55,
|
|
"learning_rate": 2.6393188854489165e-05,
|
|
"loss": 0.0784,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 21.96,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.8837165832519531,
|
|
"eval_runtime": 0.8957,
|
|
"eval_samples_per_second": 51.356,
|
|
"eval_steps_per_second": 6.699,
|
|
"step": 377
|
|
},
|
|
{
|
|
"epoch": 22.14,
|
|
"learning_rate": 2.5541795665634675e-05,
|
|
"loss": 0.0725,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 22.72,
|
|
"learning_rate": 2.4690402476780188e-05,
|
|
"loss": 0.0919,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 22.95,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6985116004943848,
|
|
"eval_runtime": 0.8562,
|
|
"eval_samples_per_second": 53.726,
|
|
"eval_steps_per_second": 7.008,
|
|
"step": 394
|
|
},
|
|
{
|
|
"epoch": 23.3,
|
|
"learning_rate": 2.3839009287925697e-05,
|
|
"loss": 0.0696,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 23.88,
|
|
"learning_rate": 2.2987616099071207e-05,
|
|
"loss": 0.064,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 24.0,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6426486968994141,
|
|
"eval_runtime": 0.9203,
|
|
"eval_samples_per_second": 49.983,
|
|
"eval_steps_per_second": 6.519,
|
|
"step": 412
|
|
},
|
|
{
|
|
"epoch": 24.47,
|
|
"learning_rate": 2.213622291021672e-05,
|
|
"loss": 0.0669,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 24.99,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.8101781010627747,
|
|
"eval_runtime": 0.8456,
|
|
"eval_samples_per_second": 54.397,
|
|
"eval_steps_per_second": 7.095,
|
|
"step": 429
|
|
},
|
|
{
|
|
"epoch": 25.05,
|
|
"learning_rate": 2.1284829721362232e-05,
|
|
"loss": 0.0994,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 25.63,
|
|
"learning_rate": 2.0433436532507742e-05,
|
|
"loss": 0.0878,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 25.98,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.7863481640815735,
|
|
"eval_runtime": 0.8658,
|
|
"eval_samples_per_second": 53.128,
|
|
"eval_steps_per_second": 6.93,
|
|
"step": 446
|
|
},
|
|
{
|
|
"epoch": 26.21,
|
|
"learning_rate": 1.958204334365325e-05,
|
|
"loss": 0.0725,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 26.8,
|
|
"learning_rate": 1.873065015479876e-05,
|
|
"loss": 0.0875,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 26.97,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.8777241706848145,
|
|
"eval_runtime": 0.8092,
|
|
"eval_samples_per_second": 56.848,
|
|
"eval_steps_per_second": 7.415,
|
|
"step": 463
|
|
},
|
|
{
|
|
"epoch": 27.38,
|
|
"learning_rate": 1.787925696594427e-05,
|
|
"loss": 0.0641,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 27.96,
|
|
"learning_rate": 1.7027863777089787e-05,
|
|
"loss": 0.0441,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 27.96,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.7324451208114624,
|
|
"eval_runtime": 0.8494,
|
|
"eval_samples_per_second": 54.159,
|
|
"eval_steps_per_second": 7.064,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 28.54,
|
|
"learning_rate": 1.6176470588235296e-05,
|
|
"loss": 0.088,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 28.95,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.8099210858345032,
|
|
"eval_runtime": 0.8292,
|
|
"eval_samples_per_second": 55.476,
|
|
"eval_steps_per_second": 7.236,
|
|
"step": 497
|
|
},
|
|
{
|
|
"epoch": 29.13,
|
|
"learning_rate": 1.5325077399380806e-05,
|
|
"loss": 0.0691,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 29.71,
|
|
"learning_rate": 1.4473684210526315e-05,
|
|
"loss": 0.0739,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 30.0,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.7775710821151733,
|
|
"eval_runtime": 0.8582,
|
|
"eval_samples_per_second": 53.601,
|
|
"eval_steps_per_second": 6.991,
|
|
"step": 515
|
|
},
|
|
{
|
|
"epoch": 30.29,
|
|
"learning_rate": 1.3622291021671828e-05,
|
|
"loss": 0.0447,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 30.87,
|
|
"learning_rate": 1.2770897832817337e-05,
|
|
"loss": 0.0598,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 30.99,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.8187718391418457,
|
|
"eval_runtime": 0.8752,
|
|
"eval_samples_per_second": 52.558,
|
|
"eval_steps_per_second": 6.855,
|
|
"step": 532
|
|
},
|
|
{
|
|
"epoch": 31.46,
|
|
"learning_rate": 1.1919504643962849e-05,
|
|
"loss": 0.0443,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 31.98,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.8549073934555054,
|
|
"eval_runtime": 0.9272,
|
|
"eval_samples_per_second": 49.611,
|
|
"eval_steps_per_second": 6.471,
|
|
"step": 549
|
|
},
|
|
{
|
|
"epoch": 32.04,
|
|
"learning_rate": 1.106811145510836e-05,
|
|
"loss": 0.0577,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 32.62,
|
|
"learning_rate": 1.0216718266253871e-05,
|
|
"loss": 0.0376,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 32.97,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.8048975467681885,
|
|
"eval_runtime": 0.7812,
|
|
"eval_samples_per_second": 58.886,
|
|
"eval_steps_per_second": 7.681,
|
|
"step": 566
|
|
},
|
|
{
|
|
"epoch": 33.2,
|
|
"learning_rate": 9.36532507739938e-06,
|
|
"loss": 0.0406,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 33.79,
|
|
"learning_rate": 8.513931888544893e-06,
|
|
"loss": 0.0375,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 33.96,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.8037001490592957,
|
|
"eval_runtime": 0.8452,
|
|
"eval_samples_per_second": 54.424,
|
|
"eval_steps_per_second": 7.099,
|
|
"step": 583
|
|
},
|
|
{
|
|
"epoch": 34.37,
|
|
"learning_rate": 7.662538699690403e-06,
|
|
"loss": 0.0485,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 34.95,
|
|
"learning_rate": 6.811145510835914e-06,
|
|
"loss": 0.0346,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 34.95,
|
|
"eval_accuracy": 0.8260869565217391,
|
|
"eval_loss": 0.8255174160003662,
|
|
"eval_runtime": 0.8432,
|
|
"eval_samples_per_second": 54.555,
|
|
"eval_steps_per_second": 7.116,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 35.53,
|
|
"learning_rate": 5.959752321981424e-06,
|
|
"loss": 0.0471,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 36.0,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.823909342288971,
|
|
"eval_runtime": 0.8722,
|
|
"eval_samples_per_second": 52.74,
|
|
"eval_steps_per_second": 6.879,
|
|
"step": 618
|
|
},
|
|
{
|
|
"epoch": 36.12,
|
|
"learning_rate": 5.1083591331269355e-06,
|
|
"loss": 0.0285,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 36.7,
|
|
"learning_rate": 4.256965944272447e-06,
|
|
"loss": 0.0669,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 36.99,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.8187545537948608,
|
|
"eval_runtime": 0.8572,
|
|
"eval_samples_per_second": 53.663,
|
|
"eval_steps_per_second": 6.999,
|
|
"step": 635
|
|
},
|
|
{
|
|
"epoch": 37.28,
|
|
"learning_rate": 3.405572755417957e-06,
|
|
"loss": 0.0386,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 37.86,
|
|
"learning_rate": 2.5541795665634677e-06,
|
|
"loss": 0.0438,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 37.98,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.8443450927734375,
|
|
"eval_runtime": 0.8317,
|
|
"eval_samples_per_second": 55.309,
|
|
"eval_steps_per_second": 7.214,
|
|
"step": 652
|
|
},
|
|
{
|
|
"epoch": 38.45,
|
|
"learning_rate": 1.7027863777089785e-06,
|
|
"loss": 0.0549,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 38.97,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.8550635576248169,
|
|
"eval_runtime": 0.8407,
|
|
"eval_samples_per_second": 54.716,
|
|
"eval_steps_per_second": 7.137,
|
|
"step": 669
|
|
},
|
|
{
|
|
"epoch": 39.03,
|
|
"learning_rate": 8.513931888544892e-07,
|
|
"loss": 0.0368,
|
|
"step": 670
|
|
},
|
|
{
|
|
"epoch": 39.61,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.0622,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 39.61,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.8551087379455566,
|
|
"eval_runtime": 0.7647,
|
|
"eval_samples_per_second": 60.156,
|
|
"eval_steps_per_second": 7.846,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 39.61,
|
|
"step": 680,
|
|
"total_flos": 2.5142726714989363e+18,
|
|
"train_loss": 0.24349691933568787,
|
|
"train_runtime": 652.5132,
|
|
"train_samples_per_second": 50.206,
|
|
"train_steps_per_second": 1.042
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 680,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 40,
|
|
"save_steps": 500,
|
|
"total_flos": 2.5142726714989363e+18,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|