|
{
|
|
"best_metric": 0.8333333333333334,
|
|
"best_model_checkpoint": "vit-base-patch16-224-RU4-40\\checkpoint-134",
|
|
"epoch": 39.48051948051948,
|
|
"eval_steps": 500,
|
|
"global_step": 760,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.52,
|
|
"learning_rate": 1.4473684210526315e-05,
|
|
"loss": 1.3822,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.99,
|
|
"eval_accuracy": 0.48333333333333334,
|
|
"eval_loss": 1.3130170106887817,
|
|
"eval_runtime": 1.2415,
|
|
"eval_samples_per_second": 48.328,
|
|
"eval_steps_per_second": 1.611,
|
|
"step": 19
|
|
},
|
|
{
|
|
"epoch": 1.04,
|
|
"learning_rate": 2.894736842105263e-05,
|
|
"loss": 1.3403,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 1.56,
|
|
"learning_rate": 4.342105263157895e-05,
|
|
"loss": 1.2724,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 1.97,
|
|
"eval_accuracy": 0.6,
|
|
"eval_loss": 1.0986911058425903,
|
|
"eval_runtime": 1.2181,
|
|
"eval_samples_per_second": 49.258,
|
|
"eval_steps_per_second": 1.642,
|
|
"step": 38
|
|
},
|
|
{
|
|
"epoch": 2.08,
|
|
"learning_rate": 5.484764542936288e-05,
|
|
"loss": 1.1316,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 2.6,
|
|
"learning_rate": 5.408587257617729e-05,
|
|
"loss": 0.9711,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 2.96,
|
|
"eval_accuracy": 0.6666666666666666,
|
|
"eval_loss": 0.862390398979187,
|
|
"eval_runtime": 1.0302,
|
|
"eval_samples_per_second": 58.242,
|
|
"eval_steps_per_second": 1.941,
|
|
"step": 57
|
|
},
|
|
{
|
|
"epoch": 3.12,
|
|
"learning_rate": 5.332409972299169e-05,
|
|
"loss": 0.7924,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 3.64,
|
|
"learning_rate": 5.256232686980609e-05,
|
|
"loss": 0.6349,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.7333333333333333,
|
|
"eval_loss": 0.7396789789199829,
|
|
"eval_runtime": 1.159,
|
|
"eval_samples_per_second": 51.769,
|
|
"eval_steps_per_second": 1.726,
|
|
"step": 77
|
|
},
|
|
{
|
|
"epoch": 4.16,
|
|
"learning_rate": 5.18005540166205e-05,
|
|
"loss": 0.5199,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 4.68,
|
|
"learning_rate": 5.1038781163434903e-05,
|
|
"loss": 0.4068,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 4.99,
|
|
"eval_accuracy": 0.75,
|
|
"eval_loss": 0.6979052424430847,
|
|
"eval_runtime": 1.165,
|
|
"eval_samples_per_second": 51.502,
|
|
"eval_steps_per_second": 1.717,
|
|
"step": 96
|
|
},
|
|
{
|
|
"epoch": 5.19,
|
|
"learning_rate": 5.027700831024931e-05,
|
|
"loss": 0.3676,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 5.71,
|
|
"learning_rate": 4.9515235457063714e-05,
|
|
"loss": 0.2877,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 5.97,
|
|
"eval_accuracy": 0.7833333333333333,
|
|
"eval_loss": 0.6270049810409546,
|
|
"eval_runtime": 1.0341,
|
|
"eval_samples_per_second": 58.021,
|
|
"eval_steps_per_second": 1.934,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 6.23,
|
|
"learning_rate": 4.8753462603878116e-05,
|
|
"loss": 0.2292,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 6.75,
|
|
"learning_rate": 4.7991689750692524e-05,
|
|
"loss": 0.2217,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 6.96,
|
|
"eval_accuracy": 0.8333333333333334,
|
|
"eval_loss": 0.6466542482376099,
|
|
"eval_runtime": 1.0399,
|
|
"eval_samples_per_second": 57.699,
|
|
"eval_steps_per_second": 1.923,
|
|
"step": 134
|
|
},
|
|
{
|
|
"epoch": 7.27,
|
|
"learning_rate": 4.7229916897506926e-05,
|
|
"loss": 0.2464,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 7.79,
|
|
"learning_rate": 4.6468144044321335e-05,
|
|
"loss": 0.195,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.7833333333333333,
|
|
"eval_loss": 0.685775876045227,
|
|
"eval_runtime": 1.0519,
|
|
"eval_samples_per_second": 57.037,
|
|
"eval_steps_per_second": 1.901,
|
|
"step": 154
|
|
},
|
|
{
|
|
"epoch": 8.31,
|
|
"learning_rate": 4.570637119113573e-05,
|
|
"loss": 0.177,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 8.83,
|
|
"learning_rate": 4.494459833795014e-05,
|
|
"loss": 0.1392,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 8.99,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.650473415851593,
|
|
"eval_runtime": 1.0115,
|
|
"eval_samples_per_second": 59.32,
|
|
"eval_steps_per_second": 1.977,
|
|
"step": 173
|
|
},
|
|
{
|
|
"epoch": 9.35,
|
|
"learning_rate": 4.418282548476455e-05,
|
|
"loss": 0.1695,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 9.87,
|
|
"learning_rate": 4.342105263157895e-05,
|
|
"loss": 0.1534,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 9.97,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.6320337057113647,
|
|
"eval_runtime": 1.1254,
|
|
"eval_samples_per_second": 53.313,
|
|
"eval_steps_per_second": 1.777,
|
|
"step": 192
|
|
},
|
|
{
|
|
"epoch": 10.39,
|
|
"learning_rate": 4.265927977839336e-05,
|
|
"loss": 0.1318,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 10.91,
|
|
"learning_rate": 4.189750692520776e-05,
|
|
"loss": 0.1136,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 10.96,
|
|
"eval_accuracy": 0.7833333333333333,
|
|
"eval_loss": 0.8345922827720642,
|
|
"eval_runtime": 1.0135,
|
|
"eval_samples_per_second": 59.201,
|
|
"eval_steps_per_second": 1.973,
|
|
"step": 211
|
|
},
|
|
{
|
|
"epoch": 11.43,
|
|
"learning_rate": 4.113573407202216e-05,
|
|
"loss": 0.1195,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 11.95,
|
|
"learning_rate": 4.037396121883656e-05,
|
|
"loss": 0.1025,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 0.6809566617012024,
|
|
"eval_runtime": 1.0794,
|
|
"eval_samples_per_second": 55.586,
|
|
"eval_steps_per_second": 1.853,
|
|
"step": 231
|
|
},
|
|
{
|
|
"epoch": 12.47,
|
|
"learning_rate": 3.961218836565097e-05,
|
|
"loss": 0.1025,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 12.99,
|
|
"learning_rate": 3.885041551246538e-05,
|
|
"loss": 0.0894,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 12.99,
|
|
"eval_accuracy": 0.7666666666666667,
|
|
"eval_loss": 0.8257845640182495,
|
|
"eval_runtime": 1.0081,
|
|
"eval_samples_per_second": 59.515,
|
|
"eval_steps_per_second": 1.984,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 13.51,
|
|
"learning_rate": 3.808864265927978e-05,
|
|
"loss": 0.1308,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 13.97,
|
|
"eval_accuracy": 0.75,
|
|
"eval_loss": 0.9456142783164978,
|
|
"eval_runtime": 1.0373,
|
|
"eval_samples_per_second": 57.843,
|
|
"eval_steps_per_second": 1.928,
|
|
"step": 269
|
|
},
|
|
{
|
|
"epoch": 14.03,
|
|
"learning_rate": 3.732686980609418e-05,
|
|
"loss": 0.1002,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 14.55,
|
|
"learning_rate": 3.6565096952908585e-05,
|
|
"loss": 0.0836,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 14.96,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 0.9084436893463135,
|
|
"eval_runtime": 1.0482,
|
|
"eval_samples_per_second": 57.243,
|
|
"eval_steps_per_second": 1.908,
|
|
"step": 288
|
|
},
|
|
{
|
|
"epoch": 15.06,
|
|
"learning_rate": 3.5803324099722994e-05,
|
|
"loss": 0.0891,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 15.58,
|
|
"learning_rate": 3.5041551246537395e-05,
|
|
"loss": 0.0813,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.8688091039657593,
|
|
"eval_runtime": 1.0293,
|
|
"eval_samples_per_second": 58.294,
|
|
"eval_steps_per_second": 1.943,
|
|
"step": 308
|
|
},
|
|
{
|
|
"epoch": 16.1,
|
|
"learning_rate": 3.4279778393351804e-05,
|
|
"loss": 0.0928,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 16.62,
|
|
"learning_rate": 3.3518005540166206e-05,
|
|
"loss": 0.1017,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 16.99,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 0.8609302043914795,
|
|
"eval_runtime": 1.0686,
|
|
"eval_samples_per_second": 56.148,
|
|
"eval_steps_per_second": 1.872,
|
|
"step": 327
|
|
},
|
|
{
|
|
"epoch": 17.14,
|
|
"learning_rate": 3.275623268698061e-05,
|
|
"loss": 0.0703,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 17.66,
|
|
"learning_rate": 3.1994459833795016e-05,
|
|
"loss": 0.076,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 17.97,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 0.9015089273452759,
|
|
"eval_runtime": 1.0675,
|
|
"eval_samples_per_second": 56.207,
|
|
"eval_steps_per_second": 1.874,
|
|
"step": 346
|
|
},
|
|
{
|
|
"epoch": 18.18,
|
|
"learning_rate": 3.123268698060942e-05,
|
|
"loss": 0.0667,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 18.7,
|
|
"learning_rate": 3.0470914127423827e-05,
|
|
"loss": 0.0726,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 18.96,
|
|
"eval_accuracy": 0.7833333333333333,
|
|
"eval_loss": 0.9918167591094971,
|
|
"eval_runtime": 1.0851,
|
|
"eval_samples_per_second": 55.292,
|
|
"eval_steps_per_second": 1.843,
|
|
"step": 365
|
|
},
|
|
{
|
|
"epoch": 19.22,
|
|
"learning_rate": 2.9709141274238225e-05,
|
|
"loss": 0.0787,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 19.74,
|
|
"learning_rate": 2.894736842105263e-05,
|
|
"loss": 0.0549,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 0.9064477682113647,
|
|
"eval_runtime": 1.0254,
|
|
"eval_samples_per_second": 58.513,
|
|
"eval_steps_per_second": 1.95,
|
|
"step": 385
|
|
},
|
|
{
|
|
"epoch": 20.26,
|
|
"learning_rate": 2.8185595567867035e-05,
|
|
"loss": 0.057,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 20.78,
|
|
"learning_rate": 2.742382271468144e-05,
|
|
"loss": 0.0676,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 20.99,
|
|
"eval_accuracy": 0.75,
|
|
"eval_loss": 0.881874680519104,
|
|
"eval_runtime": 1.3066,
|
|
"eval_samples_per_second": 45.919,
|
|
"eval_steps_per_second": 1.531,
|
|
"step": 404
|
|
},
|
|
{
|
|
"epoch": 21.3,
|
|
"learning_rate": 2.6662049861495846e-05,
|
|
"loss": 0.0518,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 21.82,
|
|
"learning_rate": 2.590027700831025e-05,
|
|
"loss": 0.0717,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 21.97,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.8607038259506226,
|
|
"eval_runtime": 1.0427,
|
|
"eval_samples_per_second": 57.543,
|
|
"eval_steps_per_second": 1.918,
|
|
"step": 423
|
|
},
|
|
{
|
|
"epoch": 22.34,
|
|
"learning_rate": 2.5138504155124656e-05,
|
|
"loss": 0.0593,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 22.86,
|
|
"learning_rate": 2.4376731301939058e-05,
|
|
"loss": 0.0547,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 22.96,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 0.8859201073646545,
|
|
"eval_runtime": 1.0387,
|
|
"eval_samples_per_second": 57.765,
|
|
"eval_steps_per_second": 1.926,
|
|
"step": 442
|
|
},
|
|
{
|
|
"epoch": 23.38,
|
|
"learning_rate": 2.3614958448753463e-05,
|
|
"loss": 0.0451,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 23.9,
|
|
"learning_rate": 2.2853185595567865e-05,
|
|
"loss": 0.0466,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 24.0,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.9327951073646545,
|
|
"eval_runtime": 1.1454,
|
|
"eval_samples_per_second": 52.385,
|
|
"eval_steps_per_second": 1.746,
|
|
"step": 462
|
|
},
|
|
{
|
|
"epoch": 24.42,
|
|
"learning_rate": 2.2091412742382273e-05,
|
|
"loss": 0.0535,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 24.94,
|
|
"learning_rate": 2.132963988919668e-05,
|
|
"loss": 0.0715,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 24.99,
|
|
"eval_accuracy": 0.7666666666666667,
|
|
"eval_loss": 1.0178091526031494,
|
|
"eval_runtime": 1.0507,
|
|
"eval_samples_per_second": 57.103,
|
|
"eval_steps_per_second": 1.903,
|
|
"step": 481
|
|
},
|
|
{
|
|
"epoch": 25.45,
|
|
"learning_rate": 2.056786703601108e-05,
|
|
"loss": 0.0503,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 25.97,
|
|
"learning_rate": 1.9806094182825486e-05,
|
|
"loss": 0.0446,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 25.97,
|
|
"eval_accuracy": 0.7666666666666667,
|
|
"eval_loss": 1.009350061416626,
|
|
"eval_runtime": 1.0259,
|
|
"eval_samples_per_second": 58.484,
|
|
"eval_steps_per_second": 1.949,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 26.49,
|
|
"learning_rate": 1.904432132963989e-05,
|
|
"loss": 0.0468,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 26.96,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.9175390005111694,
|
|
"eval_runtime": 1.0785,
|
|
"eval_samples_per_second": 55.634,
|
|
"eval_steps_per_second": 1.854,
|
|
"step": 519
|
|
},
|
|
{
|
|
"epoch": 27.01,
|
|
"learning_rate": 1.8282548476454293e-05,
|
|
"loss": 0.0523,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 27.53,
|
|
"learning_rate": 1.7520775623268698e-05,
|
|
"loss": 0.0458,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 28.0,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 0.8580291271209717,
|
|
"eval_runtime": 1.0307,
|
|
"eval_samples_per_second": 58.214,
|
|
"eval_steps_per_second": 1.94,
|
|
"step": 539
|
|
},
|
|
{
|
|
"epoch": 28.05,
|
|
"learning_rate": 1.6759002770083103e-05,
|
|
"loss": 0.0564,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 28.57,
|
|
"learning_rate": 1.5997229916897508e-05,
|
|
"loss": 0.0392,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 28.99,
|
|
"eval_accuracy": 0.7833333333333333,
|
|
"eval_loss": 1.0588656663894653,
|
|
"eval_runtime": 1.0179,
|
|
"eval_samples_per_second": 58.945,
|
|
"eval_steps_per_second": 1.965,
|
|
"step": 558
|
|
},
|
|
{
|
|
"epoch": 29.09,
|
|
"learning_rate": 1.5235457063711913e-05,
|
|
"loss": 0.0302,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 29.61,
|
|
"learning_rate": 1.4473684210526315e-05,
|
|
"loss": 0.0469,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 29.97,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 1.0904656648635864,
|
|
"eval_runtime": 1.0314,
|
|
"eval_samples_per_second": 58.173,
|
|
"eval_steps_per_second": 1.939,
|
|
"step": 577
|
|
},
|
|
{
|
|
"epoch": 30.13,
|
|
"learning_rate": 1.371191135734072e-05,
|
|
"loss": 0.0365,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 30.65,
|
|
"learning_rate": 1.2950138504155125e-05,
|
|
"loss": 0.0425,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 30.96,
|
|
"eval_accuracy": 0.7833333333333333,
|
|
"eval_loss": 1.0077567100524902,
|
|
"eval_runtime": 1.0942,
|
|
"eval_samples_per_second": 54.834,
|
|
"eval_steps_per_second": 1.828,
|
|
"step": 596
|
|
},
|
|
{
|
|
"epoch": 31.17,
|
|
"learning_rate": 1.2188365650969529e-05,
|
|
"loss": 0.0503,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 31.69,
|
|
"learning_rate": 1.1426592797783932e-05,
|
|
"loss": 0.0464,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 32.0,
|
|
"eval_accuracy": 0.7833333333333333,
|
|
"eval_loss": 1.0206407308578491,
|
|
"eval_runtime": 1.0927,
|
|
"eval_samples_per_second": 54.909,
|
|
"eval_steps_per_second": 1.83,
|
|
"step": 616
|
|
},
|
|
{
|
|
"epoch": 32.21,
|
|
"learning_rate": 1.066481994459834e-05,
|
|
"loss": 0.0315,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 32.73,
|
|
"learning_rate": 9.903047091412743e-06,
|
|
"loss": 0.0336,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 32.99,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.9653330445289612,
|
|
"eval_runtime": 1.0833,
|
|
"eval_samples_per_second": 55.384,
|
|
"eval_steps_per_second": 1.846,
|
|
"step": 635
|
|
},
|
|
{
|
|
"epoch": 33.25,
|
|
"learning_rate": 9.141274238227146e-06,
|
|
"loss": 0.0368,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 33.77,
|
|
"learning_rate": 8.379501385041551e-06,
|
|
"loss": 0.0302,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 33.97,
|
|
"eval_accuracy": 0.8,
|
|
"eval_loss": 0.9574002027511597,
|
|
"eval_runtime": 1.0375,
|
|
"eval_samples_per_second": 57.833,
|
|
"eval_steps_per_second": 1.928,
|
|
"step": 654
|
|
},
|
|
{
|
|
"epoch": 34.29,
|
|
"learning_rate": 7.617728531855957e-06,
|
|
"loss": 0.03,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 34.81,
|
|
"learning_rate": 6.85595567867036e-06,
|
|
"loss": 0.0353,
|
|
"step": 670
|
|
},
|
|
{
|
|
"epoch": 34.96,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.962119996547699,
|
|
"eval_runtime": 1.0464,
|
|
"eval_samples_per_second": 57.339,
|
|
"eval_steps_per_second": 1.911,
|
|
"step": 673
|
|
},
|
|
{
|
|
"epoch": 35.32,
|
|
"learning_rate": 6.0941828254847645e-06,
|
|
"loss": 0.0372,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 35.84,
|
|
"learning_rate": 5.33240997229917e-06,
|
|
"loss": 0.0344,
|
|
"step": 690
|
|
},
|
|
{
|
|
"epoch": 36.0,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.9792068600654602,
|
|
"eval_runtime": 1.0489,
|
|
"eval_samples_per_second": 57.204,
|
|
"eval_steps_per_second": 1.907,
|
|
"step": 693
|
|
},
|
|
{
|
|
"epoch": 36.36,
|
|
"learning_rate": 4.570637119113573e-06,
|
|
"loss": 0.036,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 36.88,
|
|
"learning_rate": 3.8088642659279783e-06,
|
|
"loss": 0.0195,
|
|
"step": 710
|
|
},
|
|
{
|
|
"epoch": 36.99,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.9458684325218201,
|
|
"eval_runtime": 1.0129,
|
|
"eval_samples_per_second": 59.236,
|
|
"eval_steps_per_second": 1.975,
|
|
"step": 712
|
|
},
|
|
{
|
|
"epoch": 37.4,
|
|
"learning_rate": 3.0470914127423822e-06,
|
|
"loss": 0.0349,
|
|
"step": 720
|
|
},
|
|
{
|
|
"epoch": 37.92,
|
|
"learning_rate": 2.2853185595567866e-06,
|
|
"loss": 0.031,
|
|
"step": 730
|
|
},
|
|
{
|
|
"epoch": 37.97,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.9487714767456055,
|
|
"eval_runtime": 1.0492,
|
|
"eval_samples_per_second": 57.189,
|
|
"eval_steps_per_second": 1.906,
|
|
"step": 731
|
|
},
|
|
{
|
|
"epoch": 38.44,
|
|
"learning_rate": 1.5235457063711911e-06,
|
|
"loss": 0.0221,
|
|
"step": 740
|
|
},
|
|
{
|
|
"epoch": 38.96,
|
|
"learning_rate": 7.617728531855956e-07,
|
|
"loss": 0.0224,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 38.96,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.943988561630249,
|
|
"eval_runtime": 1.0748,
|
|
"eval_samples_per_second": 55.822,
|
|
"eval_steps_per_second": 1.861,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 39.48,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.0309,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 39.48,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.9447524547576904,
|
|
"eval_runtime": 1.0329,
|
|
"eval_samples_per_second": 58.09,
|
|
"eval_steps_per_second": 1.936,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 39.48,
|
|
"step": 760,
|
|
"total_flos": 7.496244493905936e+18,
|
|
"train_loss": 0.1845952101443943,
|
|
"train_runtime": 1526.0361,
|
|
"train_samples_per_second": 64.219,
|
|
"train_steps_per_second": 0.498
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 760,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 40,
|
|
"save_steps": 500,
|
|
"total_flos": 7.496244493905936e+18,
|
|
"train_batch_size": 32,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|