jayanthspratap's picture
End of training
1fb5e23 verified
{
"best_metric": 0.8181818181818182,
"best_model_checkpoint": "2024_08_15_swinv2-base-patch4-window8-256/checkpoint-174",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 174,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11494252873563218,
"grad_norm": 40.21982192993164,
"learning_rate": 5.555555555555557e-06,
"loss": 0.369,
"step": 10
},
{
"epoch": 0.22988505747126436,
"grad_norm": 31.26569175720215,
"learning_rate": 9.871794871794872e-06,
"loss": 0.5608,
"step": 20
},
{
"epoch": 0.3448275862068966,
"grad_norm": 24.969097137451172,
"learning_rate": 9.230769230769232e-06,
"loss": 0.8375,
"step": 30
},
{
"epoch": 0.45977011494252873,
"grad_norm": 3.7851438522338867,
"learning_rate": 8.58974358974359e-06,
"loss": 0.4851,
"step": 40
},
{
"epoch": 0.5747126436781609,
"grad_norm": 3.543433904647827,
"learning_rate": 7.948717948717949e-06,
"loss": 0.507,
"step": 50
},
{
"epoch": 0.6896551724137931,
"grad_norm": 3.535554885864258,
"learning_rate": 7.307692307692308e-06,
"loss": 0.4749,
"step": 60
},
{
"epoch": 0.8045977011494253,
"grad_norm": 3.087676763534546,
"learning_rate": 6.666666666666667e-06,
"loss": 0.2292,
"step": 70
},
{
"epoch": 0.9195402298850575,
"grad_norm": 16.41364288330078,
"learning_rate": 6.025641025641026e-06,
"loss": 0.4908,
"step": 80
},
{
"epoch": 1.0,
"eval_accuracy": 0.7954545454545454,
"eval_loss": 0.48821794986724854,
"eval_runtime": 28.1268,
"eval_samples_per_second": 3.129,
"eval_steps_per_second": 3.129,
"step": 87
},
{
"epoch": 1.0344827586206897,
"grad_norm": 21.090808868408203,
"learning_rate": 5.384615384615385e-06,
"loss": 0.2291,
"step": 90
},
{
"epoch": 1.1494252873563218,
"grad_norm": 48.51396179199219,
"learning_rate": 4.743589743589744e-06,
"loss": 0.5361,
"step": 100
},
{
"epoch": 1.264367816091954,
"grad_norm": 1.5283576250076294,
"learning_rate": 4.102564102564103e-06,
"loss": 0.4894,
"step": 110
},
{
"epoch": 1.3793103448275863,
"grad_norm": 2.1673336029052734,
"learning_rate": 3.4615384615384617e-06,
"loss": 0.444,
"step": 120
},
{
"epoch": 1.4942528735632183,
"grad_norm": 1.3773008584976196,
"learning_rate": 2.8205128205128207e-06,
"loss": 0.2406,
"step": 130
},
{
"epoch": 1.6091954022988506,
"grad_norm": 4.856438636779785,
"learning_rate": 2.1794871794871797e-06,
"loss": 0.4014,
"step": 140
},
{
"epoch": 1.7241379310344827,
"grad_norm": 22.820289611816406,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.8933,
"step": 150
},
{
"epoch": 1.839080459770115,
"grad_norm": 47.16185760498047,
"learning_rate": 8.974358974358975e-07,
"loss": 0.319,
"step": 160
},
{
"epoch": 1.9540229885057472,
"grad_norm": 4.938830375671387,
"learning_rate": 2.564102564102564e-07,
"loss": 1.0014,
"step": 170
},
{
"epoch": 2.0,
"eval_accuracy": 0.8181818181818182,
"eval_loss": 0.49721577763557434,
"eval_runtime": 28.8569,
"eval_samples_per_second": 3.05,
"eval_steps_per_second": 3.05,
"step": 174
},
{
"epoch": 2.0,
"step": 174,
"total_flos": 7.134454803647693e+16,
"train_loss": 0.4921485822776268,
"train_runtime": 389.2579,
"train_samples_per_second": 1.788,
"train_steps_per_second": 0.447
}
],
"logging_steps": 10,
"max_steps": 174,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.134454803647693e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}