|
{
|
|
"best_metric": 0.8981937602627258,
|
|
"best_model_checkpoint": "dinov2-small-imagenet1k-1-layer-finetuned-noh\\checkpoint-46",
|
|
"epoch": 9.577777777777778,
|
|
"eval_steps": 500,
|
|
"global_step": 220,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.4444444444444444,
|
|
"grad_norm": 349.2164306640625,
|
|
"learning_rate": 2.272727272727273e-05,
|
|
"loss": 0.611,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.8888888888888888,
|
|
"grad_norm": 174.1605682373047,
|
|
"learning_rate": 4.545454545454546e-05,
|
|
"loss": 0.4924,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.8325123152709359,
|
|
"eval_loss": 0.5212485194206238,
|
|
"eval_runtime": 65.7326,
|
|
"eval_samples_per_second": 9.265,
|
|
"eval_steps_per_second": 0.593,
|
|
"step": 23
|
|
},
|
|
{
|
|
"epoch": 1.3111111111111111,
|
|
"grad_norm": 74.73553466796875,
|
|
"learning_rate": 4.797979797979798e-05,
|
|
"loss": 0.5101,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 1.7555555555555555,
|
|
"grad_norm": 69.5820083618164,
|
|
"learning_rate": 4.545454545454546e-05,
|
|
"loss": 0.5732,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.8981937602627258,
|
|
"eval_loss": 0.33657345175743103,
|
|
"eval_runtime": 69.7181,
|
|
"eval_samples_per_second": 8.735,
|
|
"eval_steps_per_second": 0.559,
|
|
"step": 46
|
|
},
|
|
{
|
|
"epoch": 2.1777777777777776,
|
|
"grad_norm": 36.10708999633789,
|
|
"learning_rate": 4.292929292929293e-05,
|
|
"loss": 0.3947,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 2.6222222222222222,
|
|
"grad_norm": 220.30311584472656,
|
|
"learning_rate": 4.0404040404040405e-05,
|
|
"loss": 0.5639,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.8489326765188834,
|
|
"eval_loss": 0.3906978666782379,
|
|
"eval_runtime": 70.3418,
|
|
"eval_samples_per_second": 8.658,
|
|
"eval_steps_per_second": 0.554,
|
|
"step": 69
|
|
},
|
|
{
|
|
"epoch": 3.0444444444444443,
|
|
"grad_norm": 58.00751495361328,
|
|
"learning_rate": 3.787878787878788e-05,
|
|
"loss": 0.4467,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 3.488888888888889,
|
|
"grad_norm": 61.74748992919922,
|
|
"learning_rate": 3.535353535353535e-05,
|
|
"loss": 0.521,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 3.9333333333333336,
|
|
"grad_norm": 52.76782989501953,
|
|
"learning_rate": 3.282828282828283e-05,
|
|
"loss": 0.4759,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.8817733990147784,
|
|
"eval_loss": 0.3481961786746979,
|
|
"eval_runtime": 69.182,
|
|
"eval_samples_per_second": 8.803,
|
|
"eval_steps_per_second": 0.564,
|
|
"step": 92
|
|
},
|
|
{
|
|
"epoch": 4.355555555555555,
|
|
"grad_norm": 83.23902893066406,
|
|
"learning_rate": 3.0303030303030306e-05,
|
|
"loss": 0.3662,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 4.8,
|
|
"grad_norm": 40.003013610839844,
|
|
"learning_rate": 2.777777777777778e-05,
|
|
"loss": 0.3757,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.8275862068965517,
|
|
"eval_loss": 0.392085462808609,
|
|
"eval_runtime": 67.7229,
|
|
"eval_samples_per_second": 8.993,
|
|
"eval_steps_per_second": 0.576,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 5.222222222222222,
|
|
"grad_norm": 54.831241607666016,
|
|
"learning_rate": 2.5252525252525256e-05,
|
|
"loss": 0.3124,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 5.666666666666667,
|
|
"grad_norm": 45.63344192504883,
|
|
"learning_rate": 2.272727272727273e-05,
|
|
"loss": 0.3356,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.896551724137931,
|
|
"eval_loss": 0.3183760344982147,
|
|
"eval_runtime": 67.9276,
|
|
"eval_samples_per_second": 8.965,
|
|
"eval_steps_per_second": 0.574,
|
|
"step": 138
|
|
},
|
|
{
|
|
"epoch": 6.088888888888889,
|
|
"grad_norm": 52.133689880371094,
|
|
"learning_rate": 2.0202020202020203e-05,
|
|
"loss": 0.2883,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 6.533333333333333,
|
|
"grad_norm": 35.971134185791016,
|
|
"learning_rate": 1.7676767676767676e-05,
|
|
"loss": 0.3177,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 6.977777777777778,
|
|
"grad_norm": 57.66413879394531,
|
|
"learning_rate": 1.5151515151515153e-05,
|
|
"loss": 0.2521,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_accuracy": 0.8571428571428571,
|
|
"eval_loss": 0.39921438694000244,
|
|
"eval_runtime": 65.0226,
|
|
"eval_samples_per_second": 9.366,
|
|
"eval_steps_per_second": 0.6,
|
|
"step": 161
|
|
},
|
|
{
|
|
"epoch": 7.4,
|
|
"grad_norm": 74.5230484008789,
|
|
"learning_rate": 1.2626262626262628e-05,
|
|
"loss": 0.2193,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 7.844444444444444,
|
|
"grad_norm": 80.1707534790039,
|
|
"learning_rate": 1.0101010101010101e-05,
|
|
"loss": 0.2981,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.8702791461412152,
|
|
"eval_loss": 0.3903738856315613,
|
|
"eval_runtime": 67.5642,
|
|
"eval_samples_per_second": 9.014,
|
|
"eval_steps_per_second": 0.577,
|
|
"step": 184
|
|
},
|
|
{
|
|
"epoch": 8.266666666666667,
|
|
"grad_norm": 82.75043487548828,
|
|
"learning_rate": 7.5757575757575764e-06,
|
|
"loss": 0.1812,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 8.71111111111111,
|
|
"grad_norm": 54.377445220947266,
|
|
"learning_rate": 5.050505050505051e-06,
|
|
"loss": 0.2302,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 9.0,
|
|
"eval_accuracy": 0.8719211822660099,
|
|
"eval_loss": 0.3987477421760559,
|
|
"eval_runtime": 67.5557,
|
|
"eval_samples_per_second": 9.015,
|
|
"eval_steps_per_second": 0.577,
|
|
"step": 207
|
|
},
|
|
{
|
|
"epoch": 9.133333333333333,
|
|
"grad_norm": 65.00289916992188,
|
|
"learning_rate": 2.5252525252525253e-06,
|
|
"loss": 0.2021,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"grad_norm": 52.96971130371094,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.1979,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"eval_accuracy": 0.8604269293924466,
|
|
"eval_loss": 0.4129030406475067,
|
|
"eval_runtime": 66.9132,
|
|
"eval_samples_per_second": 9.101,
|
|
"eval_steps_per_second": 0.583,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"step": 220,
|
|
"total_flos": 2.7458744422511e+17,
|
|
"train_loss": 0.37115615768866106,
|
|
"train_runtime": 2504.6096,
|
|
"train_samples_per_second": 5.745,
|
|
"train_steps_per_second": 0.088
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 220,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 10,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 2.7458744422511e+17,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|